1 /**************************************************************************
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
5 Copyright (c) 2001-2003, Intel Corporation
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
34 ***************************************************************************/
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.22 2004/11/22 00:46:14 dillon Exp $*/
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int em_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
50 char em_driver_version[] = "1.7.25";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into em_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static em_vendor_info_t em_vendor_info_array[] =
65 /* Intel(R) PRO/1000 Network Connection */
66 { 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
67 { 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
68 { 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
69 { 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
72 { 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
73 { 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
74 { 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
80 { 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
81 { 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
82 { 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
83 { 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
85 { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
86 { 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
90 { 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
91 { 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
96 { 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
97 { 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
98 /* required last entry */
102 /*********************************************************************
103 * Table of branding strings for all supported NICs.
104 *********************************************************************/
106 static const char *em_strings[] = {
107 "Intel(R) PRO/1000 Network Connection"
110 /*********************************************************************
111 * Function prototypes
112 *********************************************************************/
113 static int em_probe(device_t);
114 static int em_attach(device_t);
115 static int em_detach(device_t);
116 static int em_shutdown(device_t);
117 static void em_intr(void *);
118 static void em_start(struct ifnet *);
119 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
120 static void em_watchdog(struct ifnet *);
121 static void em_init(void *);
122 static void em_stop(void *);
123 static void em_media_status(struct ifnet *, struct ifmediareq *);
124 static int em_media_change(struct ifnet *);
125 static void em_identify_hardware(struct adapter *);
126 static void em_local_timer(void *);
127 static int em_hardware_init(struct adapter *);
128 static void em_setup_interface(device_t, struct adapter *);
129 static int em_setup_transmit_structures(struct adapter *);
130 static void em_initialize_transmit_unit(struct adapter *);
131 static int em_setup_receive_structures(struct adapter *);
132 static void em_initialize_receive_unit(struct adapter *);
133 static void em_enable_intr(struct adapter *);
134 static void em_disable_intr(struct adapter *);
135 static void em_free_transmit_structures(struct adapter *);
136 static void em_free_receive_structures(struct adapter *);
137 static void em_update_stats_counters(struct adapter *);
138 static void em_clean_transmit_interrupts(struct adapter *);
139 static int em_allocate_receive_structures(struct adapter *);
140 static int em_allocate_transmit_structures(struct adapter *);
141 static void em_process_receive_interrupts(struct adapter *, int);
142 static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
144 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
145 uint32_t *, uint32_t *);
146 static void em_set_promisc(struct adapter *);
147 static void em_disable_promisc(struct adapter *);
148 static void em_set_multi(struct adapter *);
149 static void em_print_hw_stats(struct adapter *);
150 static void em_print_link_status(struct adapter *);
151 static int em_get_buf(int i, struct adapter *, struct mbuf *);
152 static void em_enable_vlans(struct adapter *);
153 static int em_encap(struct adapter *, struct mbuf *);
154 static void em_smartspeed(struct adapter *);
155 static int em_82547_fifo_workaround(struct adapter *, int);
156 static void em_82547_update_fifo_head(struct adapter *, int);
157 static int em_82547_tx_fifo_reset(struct adapter *);
158 static void em_82547_move_tail(void *arg);
159 static int em_dma_malloc(struct adapter *, bus_size_t,
160 struct em_dma_alloc *, int);
161 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
162 static void em_print_debug_info(struct adapter *);
163 static int em_is_valid_ether_addr(uint8_t *);
164 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
165 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
166 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length,
167 PDESC_ARRAY desc_array);
168 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
169 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
170 static void em_add_int_delay_sysctl(struct adapter *, const char *,
172 struct em_int_delay_info *, int, int);
174 /*********************************************************************
175 * FreeBSD Device Interface Entry Points
176 *********************************************************************/
178 static device_method_t em_methods[] = {
179 /* Device interface */
180 DEVMETHOD(device_probe, em_probe),
181 DEVMETHOD(device_attach, em_attach),
182 DEVMETHOD(device_detach, em_detach),
183 DEVMETHOD(device_shutdown, em_shutdown),
187 static driver_t em_driver = {
188 "em", em_methods, sizeof(struct adapter),
191 static devclass_t em_devclass;
193 DECLARE_DUMMY_MODULE(if_em);
194 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
196 /*********************************************************************
197 * Tunable default values.
198 *********************************************************************/
200 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
201 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
203 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
204 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
205 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
206 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
207 static int em_int_throttle_ceil = 10000;
209 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
210 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
211 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
212 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
213 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
215 /*********************************************************************
216 * Device identification routine
218 * em_probe determines if the driver should be loaded on
219 * adapter based on PCI vendor/device id of the adapter.
221 * return 0 on success, positive on failure
222 *********************************************************************/
225 em_probe(device_t dev)
227 em_vendor_info_t *ent;
229 uint16_t pci_vendor_id = 0;
230 uint16_t pci_device_id = 0;
231 uint16_t pci_subvendor_id = 0;
232 uint16_t pci_subdevice_id = 0;
233 char adapter_name[60];
235 INIT_DEBUGOUT("em_probe: begin");
237 pci_vendor_id = pci_get_vendor(dev);
238 if (pci_vendor_id != EM_VENDOR_ID)
241 pci_device_id = pci_get_device(dev);
242 pci_subvendor_id = pci_get_subvendor(dev);
243 pci_subdevice_id = pci_get_subdevice(dev);
245 ent = em_vendor_info_array;
246 while (ent->vendor_id != 0) {
247 if ((pci_vendor_id == ent->vendor_id) &&
248 (pci_device_id == ent->device_id) &&
250 ((pci_subvendor_id == ent->subvendor_id) ||
251 (ent->subvendor_id == PCI_ANY_ID)) &&
253 ((pci_subdevice_id == ent->subdevice_id) ||
254 (ent->subdevice_id == PCI_ANY_ID))) {
255 snprintf(adapter_name, sizeof(adapter_name),
256 "%s, Version - %s", em_strings[ent->index],
258 device_set_desc_copy(dev, adapter_name);
267 /*********************************************************************
268 * Device initialization routine
270 * The attach entry point is called when the driver is being loaded.
271 * This routine identifies the type of hardware, allocates all resources
272 * and initializes the hardware.
274 * return 0 on success, positive on failure
275 *********************************************************************/
278 em_attach(device_t dev)
280 struct adapter *adapter;
285 INIT_DEBUGOUT("em_attach: begin");
287 adapter = device_get_softc(dev);
289 bzero(adapter, sizeof(struct adapter));
291 callout_init(&adapter->timer);
292 callout_init(&adapter->tx_fifo_timer);
295 adapter->osdep.dev = dev;
298 sysctl_ctx_init(&adapter->sysctl_ctx);
299 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
300 SYSCTL_STATIC_CHILDREN(_hw),
302 device_get_nameunit(dev),
306 if (adapter->sysctl_tree == NULL) {
311 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
312 SYSCTL_CHILDREN(adapter->sysctl_tree),
313 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
315 em_sysctl_debug_info, "I", "Debug Information");
317 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
318 SYSCTL_CHILDREN(adapter->sysctl_tree),
319 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
321 em_sysctl_stats, "I", "Statistics");
323 /* Determine hardware revision */
324 em_identify_hardware(adapter);
326 /* Set up some sysctls for the tunable interrupt delays */
327 em_add_int_delay_sysctl(adapter, "rx_int_delay",
328 "receive interrupt delay in usecs",
329 &adapter->rx_int_delay,
330 E1000_REG_OFFSET(&adapter->hw, RDTR),
331 em_rx_int_delay_dflt);
332 em_add_int_delay_sysctl(adapter, "tx_int_delay",
333 "transmit interrupt delay in usecs",
334 &adapter->tx_int_delay,
335 E1000_REG_OFFSET(&adapter->hw, TIDV),
336 em_tx_int_delay_dflt);
337 if (adapter->hw.mac_type >= em_82540) {
338 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
339 "receive interrupt delay limit in usecs",
340 &adapter->rx_abs_int_delay,
341 E1000_REG_OFFSET(&adapter->hw, RADV),
342 em_rx_abs_int_delay_dflt);
343 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
344 "transmit interrupt delay limit in usecs",
345 &adapter->tx_abs_int_delay,
346 E1000_REG_OFFSET(&adapter->hw, TADV),
347 em_tx_abs_int_delay_dflt);
348 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
349 SYSCTL_CHILDREN(adapter->sysctl_tree),
350 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
351 adapter, 0, em_sysctl_int_throttle, "I", NULL);
354 /* Parameters (to be read from user) */
355 adapter->num_tx_desc = EM_MAX_TXD;
356 adapter->num_rx_desc = EM_MAX_RXD;
357 adapter->hw.autoneg = DO_AUTO_NEG;
358 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
359 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
360 adapter->hw.tbi_compatibility_en = TRUE;
361 adapter->rx_buffer_len = EM_RXBUFFER_2048;
364 * These parameters control the automatic generation(Tx) and
365 * response(Rx) to Ethernet PAUSE frames.
367 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
368 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH;
369 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
370 adapter->hw.fc_send_xon = TRUE;
371 adapter->hw.fc = em_fc_full;
373 adapter->hw.phy_init_script = 1;
374 adapter->hw.phy_reset_disable = FALSE;
376 #ifndef EM_MASTER_SLAVE
377 adapter->hw.master_slave = em_ms_hw_default;
379 adapter->hw.master_slave = EM_MASTER_SLAVE;
383 * Set the max frame size assuming standard ethernet
386 adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
388 adapter->hw.min_frame_size =
389 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
392 * This controls when hardware reports transmit completion
395 adapter->hw.report_tx_early = 1;
398 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
400 if (!(adapter->res_memory)) {
401 device_printf(dev, "Unable to allocate bus resource: memory\n");
405 adapter->osdep.mem_bus_space_tag =
406 rman_get_bustag(adapter->res_memory);
407 adapter->osdep.mem_bus_space_handle =
408 rman_get_bushandle(adapter->res_memory);
409 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
411 if (adapter->hw.mac_type > em_82543) {
412 /* Figure our where our IO BAR is ? */
414 for (i = 0; i < 5; i++) {
415 val = pci_read_config(dev, rid, 4);
416 if (val & 0x00000001) {
417 adapter->io_rid = rid;
423 adapter->res_ioport = bus_alloc_resource_any(dev,
424 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
425 if (!(adapter->res_ioport)) {
426 device_printf(dev, "Unable to allocate bus resource: ioport\n");
431 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
432 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
436 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
437 &rid, RF_SHAREABLE | RF_ACTIVE);
438 if (!(adapter->res_interrupt)) {
439 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
444 adapter->hw.back = &adapter->osdep;
446 /* Initialize eeprom parameters */
447 em_init_eeprom_params(&adapter->hw);
449 tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
451 /* Allocate Transmit Descriptor ring */
452 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
453 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
457 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
459 rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
461 /* Allocate Receive Descriptor ring */
462 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
463 device_printf(dev, "Unable to allocate rx_desc memory\n");
467 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
469 /* Initialize the hardware */
470 if (em_hardware_init(adapter)) {
471 device_printf(dev, "Unable to initialize the hardware\n");
476 /* Copy the permanent MAC address out of the EEPROM */
477 if (em_read_mac_addr(&adapter->hw) < 0) {
478 device_printf(dev, "EEPROM read error while reading mac address\n");
483 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
484 device_printf(dev, "Invalid mac address\n");
489 /* Setup OS specific network interface */
490 em_setup_interface(dev, adapter);
492 /* Initialize statistics */
493 em_clear_hw_cntrs(&adapter->hw);
494 em_update_stats_counters(adapter);
495 adapter->hw.get_link_status = 1;
496 em_check_for_link(&adapter->hw);
498 /* Print the link status */
499 if (adapter->link_active == 1) {
500 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
501 &adapter->link_duplex);
502 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
504 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
506 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
508 /* Identify 82544 on PCIX */
509 em_get_bus_info(&adapter->hw);
510 if (adapter->hw.bus_type == em_bus_type_pcix &&
511 adapter->hw.mac_type == em_82544)
512 adapter->pcix_82544 = TRUE;
514 adapter->pcix_82544 = FALSE;
516 error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET,
517 (void (*)(void *)) em_intr, adapter,
518 &adapter->int_handler_tag);
520 device_printf(dev, "Error registering interrupt handler!\n");
521 ether_ifdetach(&adapter->interface_data.ac_if);
525 INIT_DEBUGOUT("em_attach: end");
533 /*********************************************************************
534 * Device removal routine
536 * The detach entry point is called when the driver is being removed.
537 * This routine stops the adapter and deallocates all the resources
538 * that were allocated for driver operation.
540 * return 0 on success, positive on failure
541 *********************************************************************/
544 em_detach(device_t dev)
546 struct adapter * adapter = device_get_softc(dev);
549 INIT_DEBUGOUT("em_detach: begin");
552 adapter->in_detach = 1;
554 if (device_is_attached(dev)) {
556 em_phy_hw_reset(&adapter->hw);
557 ether_ifdetach(&adapter->interface_data.ac_if);
559 bus_generic_detach(dev);
561 if (adapter->res_interrupt != NULL) {
562 bus_teardown_intr(dev, adapter->res_interrupt,
563 adapter->int_handler_tag);
564 bus_release_resource(dev, SYS_RES_IRQ, 0,
565 adapter->res_interrupt);
567 if (adapter->res_memory != NULL) {
568 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
569 adapter->res_memory);
572 if (adapter->res_ioport != NULL) {
573 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
574 adapter->res_ioport);
577 /* Free Transmit Descriptor ring */
578 if (adapter->tx_desc_base != NULL) {
579 em_dma_free(adapter, &adapter->txdma);
580 adapter->tx_desc_base = NULL;
583 /* Free Receive Descriptor ring */
584 if (adapter->rx_desc_base != NULL) {
585 em_dma_free(adapter, &adapter->rxdma);
586 adapter->rx_desc_base = NULL;
589 adapter->sysctl_tree = NULL;
590 sysctl_ctx_free(&adapter->sysctl_ctx);
596 /*********************************************************************
598 * Shutdown entry point
600 **********************************************************************/
603 em_shutdown(device_t dev)
605 struct adapter *adapter = device_get_softc(dev);
610 /*********************************************************************
611 * Transmit entry point
613 * em_start is called by the stack to initiate a transmit.
614 * The driver will remain in this routine as long as there are
615 * packets to transmit and transmit resources are available.
616 * In case resources are not available stack is notified and
617 * the packet is requeued.
618 **********************************************************************/
621 em_start(struct ifnet *ifp)
625 struct adapter *adapter = ifp->if_softc;
627 if (!adapter->link_active)
631 while (ifp->if_snd.ifq_head != NULL) {
632 IF_DEQUEUE(&ifp->if_snd, m_head);
637 if (em_encap(adapter, m_head)) {
638 ifp->if_flags |= IFF_OACTIVE;
639 IF_PREPEND(&ifp->if_snd, m_head);
643 /* Send a copy of the frame to the BPF listener */
644 BPF_MTAP(ifp, m_head);
646 /* Set timeout in case hardware has problems transmitting */
647 ifp->if_timer = EM_TX_TIMEOUT;
652 /*********************************************************************
655 * em_ioctl is called when the user wants to configure the
658 * return 0 on success, positive on failure
659 **********************************************************************/
662 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
664 int s, mask, error = 0;
665 struct ifreq *ifr = (struct ifreq *) data;
666 struct adapter *adapter = ifp->if_softc;
670 if (adapter->in_detach)
676 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
677 ether_ioctl(ifp, command, data);
680 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
681 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
684 ifp->if_mtu = ifr->ifr_mtu;
685 adapter->hw.max_frame_size =
686 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
691 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
692 if (ifp->if_flags & IFF_UP) {
693 if (!(ifp->if_flags & IFF_RUNNING))
695 em_disable_promisc(adapter);
696 em_set_promisc(adapter);
698 if (ifp->if_flags & IFF_RUNNING)
704 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
705 if (ifp->if_flags & IFF_RUNNING) {
706 em_disable_intr(adapter);
707 em_set_multi(adapter);
708 if (adapter->hw.mac_type == em_82542_rev2_0)
709 em_initialize_receive_unit(adapter);
710 #ifdef DEVICE_POLLING
711 if (!(ifp->if_flags & IFF_POLLING))
713 em_enable_intr(adapter);
718 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
719 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
722 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
723 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
724 if (mask & IFCAP_HWCSUM) {
725 if (IFCAP_HWCSUM & ifp->if_capenable)
726 ifp->if_capenable &= ~IFCAP_HWCSUM;
728 ifp->if_capenable |= IFCAP_HWCSUM;
729 if (ifp->if_flags & IFF_RUNNING)
734 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
743 /*********************************************************************
744 * Watchdog entry point
746 * This routine is called whenever hardware quits transmitting.
748 **********************************************************************/
751 em_watchdog(struct ifnet *ifp)
753 struct adapter * adapter;
754 adapter = ifp->if_softc;
756 /* If we are in this routine because of pause frames, then
757 * don't reset the hardware.
759 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
760 ifp->if_timer = EM_TX_TIMEOUT;
764 if (em_check_for_link(&adapter->hw))
765 if_printf(ifp, "watchdog timeout -- resetting\n");
767 ifp->if_flags &= ~IFF_RUNNING;
774 /*********************************************************************
777 * This routine is used in two ways. It is used by the stack as
778 * init entry point in network interface structure. It is also used
779 * by the driver as a hw/sw initialization routine to get to a
782 * return 0 on success, positive on failure
783 **********************************************************************/
789 struct adapter *adapter = arg;
790 struct ifnet *ifp = &adapter->interface_data.ac_if;
792 INIT_DEBUGOUT("em_init: begin");
798 /* Get the latest mac address, User can use a LAA */
799 bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
802 /* Initialize the hardware */
803 if (em_hardware_init(adapter)) {
804 if_printf(ifp, "Unable to initialize the hardware\n");
809 em_enable_vlans(adapter);
811 /* Prepare transmit descriptors and buffers */
812 if (em_setup_transmit_structures(adapter)) {
813 if_printf(ifp, "Could not setup transmit structures\n");
818 em_initialize_transmit_unit(adapter);
820 /* Setup Multicast table */
821 em_set_multi(adapter);
823 /* Prepare receive descriptors and buffers */
824 if (em_setup_receive_structures(adapter)) {
825 if_printf(ifp, "Could not setup receive structures\n");
830 em_initialize_receive_unit(adapter);
832 /* Don't loose promiscuous settings */
833 em_set_promisc(adapter);
835 ifp->if_flags |= IFF_RUNNING;
836 ifp->if_flags &= ~IFF_OACTIVE;
838 if (adapter->hw.mac_type >= em_82543) {
839 if (ifp->if_capenable & IFCAP_TXCSUM)
840 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
842 ifp->if_hwassist = 0;
845 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
846 em_clear_hw_cntrs(&adapter->hw);
847 #ifdef DEVICE_POLLING
849 * Only enable interrupts if we are not polling, make sure
850 * they are off otherwise.
852 if (ifp->if_flags & IFF_POLLING)
853 em_disable_intr(adapter);
855 #endif /* DEVICE_POLLING */
856 em_enable_intr(adapter);
858 /* Don't reset the phy next time init gets called */
859 adapter->hw.phy_reset_disable = TRUE;
864 #ifdef DEVICE_POLLING
865 static poll_handler_t em_poll;
868 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
870 struct adapter *adapter = ifp->if_softc;
873 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
874 em_enable_intr(adapter);
877 if (cmd == POLL_AND_CHECK_STATUS) {
878 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
879 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
880 callout_stop(&adapter->timer);
881 adapter->hw.get_link_status = 1;
882 em_check_for_link(&adapter->hw);
883 em_print_link_status(adapter);
884 callout_reset(&adapter->timer, 2*hz, em_local_timer,
888 if (ifp->if_flags & IFF_RUNNING) {
889 em_process_receive_interrupts(adapter, count);
890 em_clean_transmit_interrupts(adapter);
893 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
896 #endif /* DEVICE_POLLING */
898 /*********************************************************************
900 * Interrupt Service routine
902 **********************************************************************/
906 uint32_t loop_cnt = EM_MAX_INTR;
909 struct adapter *adapter = arg;
911 ifp = &adapter->interface_data.ac_if;
913 #ifdef DEVICE_POLLING
914 if (ifp->if_flags & IFF_POLLING)
917 if (ether_poll_register(em_poll, ifp)) {
918 em_disable_intr(adapter);
922 #endif /* DEVICE_POLLING */
924 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
928 /* Link status change */
929 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
930 callout_stop(&adapter->timer);
931 adapter->hw.get_link_status = 1;
932 em_check_for_link(&adapter->hw);
933 em_print_link_status(adapter);
934 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
937 while (loop_cnt > 0) {
938 if (ifp->if_flags & IFF_RUNNING) {
939 em_process_receive_interrupts(adapter, -1);
940 em_clean_transmit_interrupts(adapter);
945 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
949 /*********************************************************************
951 * Media Ioctl callback
953 * This routine is called whenever the user queries the status of
954 * the interface using ifconfig.
956 **********************************************************************/
958 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
960 struct adapter * adapter = ifp->if_softc;
962 INIT_DEBUGOUT("em_media_status: begin");
964 em_check_for_link(&adapter->hw);
965 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
966 if (adapter->link_active == 0) {
967 em_get_speed_and_duplex(&adapter->hw,
968 &adapter->link_speed,
969 &adapter->link_duplex);
970 adapter->link_active = 1;
973 if (adapter->link_active == 1) {
974 adapter->link_speed = 0;
975 adapter->link_duplex = 0;
976 adapter->link_active = 0;
980 ifmr->ifm_status = IFM_AVALID;
981 ifmr->ifm_active = IFM_ETHER;
983 if (!adapter->link_active)
986 ifmr->ifm_status |= IFM_ACTIVE;
988 if (adapter->hw.media_type == em_media_type_fiber) {
989 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
991 switch (adapter->link_speed) {
993 ifmr->ifm_active |= IFM_10_T;
996 ifmr->ifm_active |= IFM_100_TX;
999 ifmr->ifm_active |= IFM_1000_TX;
1002 if (adapter->link_duplex == FULL_DUPLEX)
1003 ifmr->ifm_active |= IFM_FDX;
1005 ifmr->ifm_active |= IFM_HDX;
1009 /*********************************************************************
1011 * Media Ioctl callback
1013 * This routine is called when the user changes speed/duplex using
1014 * media/mediopt option with ifconfig.
1016 **********************************************************************/
1018 em_media_change(struct ifnet *ifp)
1020 struct adapter * adapter = ifp->if_softc;
1021 struct ifmedia *ifm = &adapter->media;
1023 INIT_DEBUGOUT("em_media_change: begin");
1025 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1028 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1030 adapter->hw.autoneg = DO_AUTO_NEG;
1031 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1035 adapter->hw.autoneg = DO_AUTO_NEG;
1036 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1039 adapter->hw.autoneg = FALSE;
1040 adapter->hw.autoneg_advertised = 0;
1041 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1042 adapter->hw.forced_speed_duplex = em_100_full;
1044 adapter->hw.forced_speed_duplex = em_100_half;
1047 adapter->hw.autoneg = FALSE;
1048 adapter->hw.autoneg_advertised = 0;
1049 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1050 adapter->hw.forced_speed_duplex = em_10_full;
1052 adapter->hw.forced_speed_duplex = em_10_half;
1055 if_printf(ifp, "Unsupported media type\n");
1058 * As the speed/duplex settings may have changed we need to
1061 adapter->hw.phy_reset_disable = FALSE;
1069 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1072 struct em_q *q = arg;
1076 KASSERT(nsegs <= EM_MAX_SCATTER,
1077 ("Too many DMA segments returned when mapping tx packet"));
1079 bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1082 #define EM_FIFO_HDR 0x10
1083 #define EM_82547_PKT_THRESH 0x3e0
1084 #define EM_82547_TX_FIFO_SIZE 0x2800
1085 #define EM_82547_TX_FIFO_BEGIN 0xf00
1086 /*********************************************************************
1088 * This routine maps the mbufs to tx descriptors.
1090 * return 0 on success, positive on failure
1091 **********************************************************************/
1093 em_encap(struct adapter *adapter, struct mbuf *m_head)
1096 uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1100 /* For 82544 Workaround */
1101 DESC_ARRAY desc_array;
1102 uint32_t array_elements;
1105 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1106 struct ifvlan *ifv = NULL;
1111 struct em_buffer *tx_buffer = NULL;
1112 struct em_tx_desc *current_tx_desc = NULL;
1113 struct ifnet *ifp = &adapter->interface_data.ac_if;
1116 * Force a cleanup if number of TX descriptors
1117 * available hits the threshold
1119 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1120 em_clean_transmit_interrupts(adapter);
1121 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1122 adapter->no_tx_desc_avail1++;
1127 * Map the packet for DMA.
1129 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1130 adapter->no_tx_map_avail++;
1133 error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1134 &q, BUS_DMA_NOWAIT);
1136 adapter->no_tx_dma_setup++;
1137 bus_dmamap_destroy(adapter->txtag, q.map);
1140 KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1142 if (q.nsegs > adapter->num_tx_desc_avail) {
1143 adapter->no_tx_desc_avail2++;
1144 bus_dmamap_unload(adapter->txtag, q.map);
1145 bus_dmamap_destroy(adapter->txtag, q.map);
1149 if (ifp->if_hwassist > 0) {
1150 em_transmit_checksum_setup(adapter, m_head,
1151 &txd_upper, &txd_lower);
1154 txd_upper = txd_lower = 0;
1156 /* Find out if we are in vlan mode */
1157 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1158 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1159 m_head->m_pkthdr.rcvif != NULL &&
1160 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1161 ifv = m_head->m_pkthdr.rcvif->if_softc;
1163 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1166 i = adapter->next_avail_tx_desc;
1167 if (adapter->pcix_82544) {
1171 for (j = 0; j < q.nsegs; j++) {
1172 /* If adapter is 82544 and on PCIX bus */
1173 if(adapter->pcix_82544) {
1175 address = htole64(q.segs[j].ds_addr);
1177 * Check the Address and Length combination and
1178 * split the data accordingly
1180 array_elements = em_fill_descriptors(address,
1181 htole32(q.segs[j].ds_len),
1183 for (counter = 0; counter < array_elements; counter++) {
1184 if (txd_used == adapter->num_tx_desc_avail) {
1185 adapter->next_avail_tx_desc = txd_saved;
1186 adapter->no_tx_desc_avail2++;
1187 bus_dmamap_unload(adapter->txtag, q.map);
1188 bus_dmamap_destroy(adapter->txtag, q.map);
1191 tx_buffer = &adapter->tx_buffer_area[i];
1192 current_tx_desc = &adapter->tx_desc_base[i];
1193 current_tx_desc->buffer_addr = htole64(
1194 desc_array.descriptor[counter].address);
1195 current_tx_desc->lower.data = htole32(
1196 (adapter->txd_cmd | txd_lower |
1197 (uint16_t)desc_array.descriptor[counter].length));
1198 current_tx_desc->upper.data = htole32((txd_upper));
1199 if (++i == adapter->num_tx_desc)
1202 tx_buffer->m_head = NULL;
1206 tx_buffer = &adapter->tx_buffer_area[i];
1207 current_tx_desc = &adapter->tx_desc_base[i];
1209 current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1210 current_tx_desc->lower.data = htole32(
1211 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1212 current_tx_desc->upper.data = htole32(txd_upper);
1214 if (++i == adapter->num_tx_desc)
1217 tx_buffer->m_head = NULL;
1221 adapter->next_avail_tx_desc = i;
1222 if (adapter->pcix_82544)
1223 adapter->num_tx_desc_avail -= txd_used;
1225 adapter->num_tx_desc_avail -= q.nsegs;
1227 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1229 /* Set the vlan id */
1230 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1233 /* Set the vlan id */
1234 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1237 /* Tell hardware to add tag */
1238 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1241 tx_buffer->m_head = m_head;
1242 tx_buffer->map = q.map;
1243 bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1246 * Last Descriptor of Packet needs End Of Packet (EOP)
1248 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1251 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1252 * that this frame is available to transmit.
1254 if (adapter->hw.mac_type == em_82547 &&
1255 adapter->link_duplex == HALF_DUPLEX) {
1256 em_82547_move_tail(adapter);
1258 E1000_WRITE_REG(&adapter->hw, TDT, i);
1259 if (adapter->hw.mac_type == em_82547) {
1260 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1267 /*********************************************************************
1269 * 82547 workaround to avoid controller hang in half-duplex environment.
1270 * The workaround is to avoid queuing a large packet that would span
1271 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1272 * in this case. We do that only when FIFO is quiescent.
1274 **********************************************************************/
1276 em_82547_move_tail(void *arg)
1279 struct adapter *adapter = arg;
1282 struct em_tx_desc *tx_desc;
1283 uint16_t length = 0;
1287 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1288 sw_tdt = adapter->next_avail_tx_desc;
1290 while (hw_tdt != sw_tdt) {
1291 tx_desc = &adapter->tx_desc_base[hw_tdt];
1292 length += tx_desc->lower.flags.length;
1293 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1294 if(++hw_tdt == adapter->num_tx_desc)
1298 if (em_82547_fifo_workaround(adapter, length)) {
1299 adapter->tx_fifo_wrk++;
1300 callout_reset(&adapter->tx_fifo_timer, 1,
1301 em_82547_move_tail, adapter);
1304 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1305 em_82547_update_fifo_head(adapter, length);
1313 em_82547_fifo_workaround(struct adapter *adapter, int len)
1315 int fifo_space, fifo_pkt_len;
1317 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1319 if (adapter->link_duplex == HALF_DUPLEX) {
1320 fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1322 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1323 if (em_82547_tx_fifo_reset(adapter))
1334 em_82547_update_fifo_head(struct adapter *adapter, int len)
1336 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1338 /* tx_fifo_head is always 16 byte aligned */
1339 adapter->tx_fifo_head += fifo_pkt_len;
1340 if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1341 adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1345 em_82547_tx_fifo_reset(struct adapter *adapter)
1349 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1350 E1000_READ_REG(&adapter->hw, TDH)) &&
1351 (E1000_READ_REG(&adapter->hw, TDFT) ==
1352 E1000_READ_REG(&adapter->hw, TDFH)) &&
1353 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1354 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1355 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1357 /* Disable TX unit */
1358 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1359 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1361 /* Reset FIFO pointers */
1362 E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1363 E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1364 E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1365 E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1367 /* Re-enable TX unit */
1368 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1369 E1000_WRITE_FLUSH(&adapter->hw);
1371 adapter->tx_fifo_head = 0;
1372 adapter->tx_fifo_reset++;
1382 em_set_promisc(struct adapter *adapter)
1385 struct ifnet *ifp = &adapter->interface_data.ac_if;
1387 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1389 if (ifp->if_flags & IFF_PROMISC) {
1390 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1391 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1392 } else if (ifp->if_flags & IFF_ALLMULTI) {
1393 reg_rctl |= E1000_RCTL_MPE;
1394 reg_rctl &= ~E1000_RCTL_UPE;
1395 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1400 em_disable_promisc(struct adapter *adapter)
1404 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1406 reg_rctl &= (~E1000_RCTL_UPE);
1407 reg_rctl &= (~E1000_RCTL_MPE);
1408 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1411 /*********************************************************************
1414 * This routine is called whenever multicast address list is updated.
1416 **********************************************************************/
1419 em_set_multi(struct adapter *adapter)
1421 uint32_t reg_rctl = 0;
1422 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1423 struct ifmultiaddr *ifma;
1425 struct ifnet *ifp = &adapter->interface_data.ac_if;
1427 IOCTL_DEBUGOUT("em_set_multi: begin");
1429 if (adapter->hw.mac_type == em_82542_rev2_0) {
1430 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1431 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1432 em_pci_clear_mwi(&adapter->hw);
1433 reg_rctl |= E1000_RCTL_RST;
1434 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1438 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1439 if (ifma->ifma_addr->sa_family != AF_LINK)
1442 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1445 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1446 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1450 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1451 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1452 reg_rctl |= E1000_RCTL_MPE;
1453 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1455 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1457 if (adapter->hw.mac_type == em_82542_rev2_0) {
1458 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1459 reg_rctl &= ~E1000_RCTL_RST;
1460 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1462 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1463 em_pci_set_mwi(&adapter->hw);
1467 /*********************************************************************
1470 * This routine checks for link status and updates statistics.
1472 **********************************************************************/
1475 em_local_timer(void *arg)
1479 struct adapter *adapter = arg;
1480 ifp = &adapter->interface_data.ac_if;
1484 em_check_for_link(&adapter->hw);
1485 em_print_link_status(adapter);
1486 em_update_stats_counters(adapter);
1487 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1488 em_print_hw_stats(adapter);
1489 em_smartspeed(adapter);
1491 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1497 em_print_link_status(struct adapter *adapter)
1499 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1500 if (adapter->link_active == 0) {
1501 em_get_speed_and_duplex(&adapter->hw,
1502 &adapter->link_speed,
1503 &adapter->link_duplex);
1504 device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1505 adapter->link_speed,
1506 ((adapter->link_duplex == FULL_DUPLEX) ?
1507 "Full Duplex" : "Half Duplex"));
1508 adapter->link_active = 1;
1509 adapter->smartspeed = 0;
1512 if (adapter->link_active == 1) {
1513 adapter->link_speed = 0;
1514 adapter->link_duplex = 0;
1515 device_printf(adapter->dev, "Link is Down\n");
1516 adapter->link_active = 0;
1521 /*********************************************************************
1523 * This routine disables all traffic on the adapter by issuing a
1524 * global reset on the MAC and deallocates TX/RX buffers.
1526 **********************************************************************/
1532 struct adapter * adapter = arg;
1533 ifp = &adapter->interface_data.ac_if;
1535 INIT_DEBUGOUT("em_stop: begin");
1536 em_disable_intr(adapter);
1537 em_reset_hw(&adapter->hw);
1538 callout_stop(&adapter->timer);
1539 callout_stop(&adapter->tx_fifo_timer);
1540 em_free_transmit_structures(adapter);
1541 em_free_receive_structures(adapter);
1543 /* Tell the stack that the interface is no longer active */
1544 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1548 /*********************************************************************
1550 * Determine hardware revision.
1552 **********************************************************************/
1554 em_identify_hardware(struct adapter * adapter)
1556 device_t dev = adapter->dev;
1558 /* Make sure our PCI config space has the necessary stuff set */
1559 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1560 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1561 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1562 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1563 adapter->hw.pci_cmd_word |=
1564 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1565 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1568 /* Save off the information about this board */
1569 adapter->hw.vendor_id = pci_get_vendor(dev);
1570 adapter->hw.device_id = pci_get_device(dev);
1571 adapter->hw.revision_id = pci_get_revid(dev);
1572 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1573 adapter->hw.subsystem_id = pci_get_subdevice(dev);
1575 /* Identify the MAC */
1576 if (em_set_mac_type(&adapter->hw))
1577 device_printf(dev, "Unknown MAC Type\n");
1579 if (adapter->hw.mac_type == em_82541 ||
1580 adapter->hw.mac_type == em_82541_rev_2 ||
1581 adapter->hw.mac_type == em_82547 ||
1582 adapter->hw.mac_type == em_82547_rev_2)
1583 adapter->hw.phy_init_script = TRUE;
1586 /*********************************************************************
1588 * Initialize the hardware to a configuration as specified by the
1589 * adapter structure. The controller is reset, the EEPROM is
1590 * verified, the MAC address is set, then the shared initialization
1591 * routines are called.
1593 **********************************************************************/
1595 em_hardware_init(struct adapter *adapter)
1597 INIT_DEBUGOUT("em_hardware_init: begin");
1598 /* Issue a global reset */
1599 em_reset_hw(&adapter->hw);
1601 /* When hardware is reset, fifo_head is also reset */
1602 adapter->tx_fifo_head = 0;
1604 /* Make sure we have a good EEPROM before we read from it */
1605 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1606 device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1610 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1611 device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1615 if (em_init_hw(&adapter->hw) < 0) {
1616 device_printf(adapter->dev, "Hardware Initialization Failed");
1620 em_check_for_link(&adapter->hw);
1621 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1622 adapter->link_active = 1;
1624 adapter->link_active = 0;
1626 if (adapter->link_active) {
1627 em_get_speed_and_duplex(&adapter->hw,
1628 &adapter->link_speed,
1629 &adapter->link_duplex);
1631 adapter->link_speed = 0;
1632 adapter->link_duplex = 0;
1638 /*********************************************************************
1640 * Setup networking device structure and register an interface.
1642 **********************************************************************/
1644 em_setup_interface(device_t dev, struct adapter *adapter)
1647 INIT_DEBUGOUT("em_setup_interface: begin");
1649 ifp = &adapter->interface_data.ac_if;
1650 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1651 ifp->if_mtu = ETHERMTU;
1652 ifp->if_baudrate = 1000000000;
1653 ifp->if_init = em_init;
1654 ifp->if_softc = adapter;
1655 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1656 ifp->if_ioctl = em_ioctl;
1657 ifp->if_start = em_start;
1658 ifp->if_watchdog = em_watchdog;
1659 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1661 ether_ifattach(ifp, adapter->hw.mac_addr);
1663 if (adapter->hw.mac_type >= em_82543) {
1664 ifp->if_capabilities = IFCAP_HWCSUM;
1665 ifp->if_capenable = ifp->if_capabilities;
1669 * Tell the upper layer(s) we support long frames.
1671 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1672 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
1673 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1677 * Specify the media types supported by this adapter and register
1678 * callbacks to update media and link information
1680 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1682 if (adapter->hw.media_type == em_media_type_fiber) {
1683 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1685 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1688 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1689 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1691 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1693 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1695 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX | IFM_FDX,
1697 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX, 0, NULL);
1699 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1700 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1703 /*********************************************************************
1705 * Workaround for SmartSpeed on 82541 and 82547 controllers
1707 **********************************************************************/
1709 em_smartspeed(struct adapter *adapter)
1713 if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1714 !adapter->hw.autoneg ||
1715 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1718 if (adapter->smartspeed == 0) {
1720 * If Master/Slave config fault is asserted twice,
1721 * we assume back-to-back.
1723 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1724 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1726 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1727 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1728 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1730 if (phy_tmp & CR_1000T_MS_ENABLE) {
1731 phy_tmp &= ~CR_1000T_MS_ENABLE;
1732 em_write_phy_reg(&adapter->hw,
1733 PHY_1000T_CTRL, phy_tmp);
1734 adapter->smartspeed++;
1735 if (adapter->hw.autoneg &&
1736 !em_phy_setup_autoneg(&adapter->hw) &&
1737 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1739 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1740 MII_CR_RESTART_AUTO_NEG);
1741 em_write_phy_reg(&adapter->hw,
1747 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1748 /* If still no link, perhaps using 2/3 pair cable */
1749 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1750 phy_tmp |= CR_1000T_MS_ENABLE;
1751 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1752 if (adapter->hw.autoneg &&
1753 !em_phy_setup_autoneg(&adapter->hw) &&
1754 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1755 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1756 MII_CR_RESTART_AUTO_NEG);
1757 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1760 /* Restart process after EM_SMARTSPEED_MAX iterations */
1761 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1762 adapter->smartspeed = 0;
1766 * Manage DMA'able memory.
1769 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1773 *(bus_addr_t*) arg = segs->ds_addr;
1777 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1778 struct em_dma_alloc *dma, int mapflags)
1781 device_t dev = adapter->dev;
1783 r = bus_dma_tag_create(NULL, /* parent */
1784 PAGE_SIZE, 0, /* alignment, bounds */
1785 BUS_SPACE_MAXADDR, /* lowaddr */
1786 BUS_SPACE_MAXADDR, /* highaddr */
1787 NULL, NULL, /* filter, filterarg */
1790 size, /* maxsegsize */
1791 BUS_DMA_ALLOCNOW, /* flags */
1794 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1799 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1800 BUS_DMA_NOWAIT, &dma->dma_map);
1802 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1803 "size %llu, error %d\n", (uintmax_t)size, r);
1807 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1811 mapflags | BUS_DMA_NOWAIT);
1813 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1818 dma->dma_size = size;
1822 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1824 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1825 bus_dma_tag_destroy(dma->dma_tag);
1827 dma->dma_map = NULL;
1828 dma->dma_tag = NULL;
1833 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1835 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1836 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1837 bus_dma_tag_destroy(dma->dma_tag);
1840 /*********************************************************************
1842 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1843 * the information needed to transmit a packet on the wire.
1845 **********************************************************************/
1847 em_allocate_transmit_structures(struct adapter * adapter)
1849 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1850 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1851 if (adapter->tx_buffer_area == NULL) {
1852 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1859 /*********************************************************************
1861 * Allocate and initialize transmit structures.
1863 **********************************************************************/
1865 em_setup_transmit_structures(struct adapter * adapter)
1868 * Setup DMA descriptor areas.
1870 if (bus_dma_tag_create(NULL, /* parent */
1871 1, 0, /* alignment, bounds */
1872 BUS_SPACE_MAXADDR, /* lowaddr */
1873 BUS_SPACE_MAXADDR, /* highaddr */
1874 NULL, NULL, /* filter, filterarg */
1875 MCLBYTES * 8, /* maxsize */
1876 EM_MAX_SCATTER, /* nsegments */
1877 MCLBYTES * 8, /* maxsegsize */
1878 BUS_DMA_ALLOCNOW, /* flags */
1880 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1884 if (em_allocate_transmit_structures(adapter))
1887 bzero((void *) adapter->tx_desc_base,
1888 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1890 adapter->next_avail_tx_desc = 0;
1891 adapter->oldest_used_tx_desc = 0;
1893 /* Set number of descriptors available */
1894 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1896 /* Set checksum context */
1897 adapter->active_checksum_context = OFFLOAD_NONE;
1902 /*********************************************************************
1904 * Enable transmit unit.
1906 **********************************************************************/
1908 em_initialize_transmit_unit(struct adapter * adapter)
1911 uint32_t reg_tipg = 0;
1914 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1916 /* Setup the Base and Length of the Tx Descriptor Ring */
1917 bus_addr = adapter->txdma.dma_paddr;
1918 E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1919 E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1920 E1000_WRITE_REG(&adapter->hw, TDLEN,
1921 adapter->num_tx_desc * sizeof(struct em_tx_desc));
1923 /* Setup the HW Tx Head and Tail descriptor pointers */
1924 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1925 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1927 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1928 E1000_READ_REG(&adapter->hw, TDBAL),
1929 E1000_READ_REG(&adapter->hw, TDLEN));
1931 /* Set the default values for the Tx Inter Packet Gap timer */
1932 switch (adapter->hw.mac_type) {
1933 case em_82542_rev2_0:
1934 case em_82542_rev2_1:
1935 reg_tipg = DEFAULT_82542_TIPG_IPGT;
1936 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1937 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1940 if (adapter->hw.media_type == em_media_type_fiber)
1941 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1943 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1944 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1945 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1948 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1949 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1950 if (adapter->hw.mac_type >= em_82540)
1951 E1000_WRITE_REG(&adapter->hw, TADV,
1952 adapter->tx_abs_int_delay.value);
1954 /* Program the Transmit Control Register */
1955 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1956 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1957 if (adapter->link_duplex == 1)
1958 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1960 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1961 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1963 /* Setup Transmit Descriptor Settings for this adapter */
1964 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1966 if (adapter->tx_int_delay.value > 0)
1967 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1970 /*********************************************************************
1972 * Free all transmit related data structures.
1974 **********************************************************************/
1976 em_free_transmit_structures(struct adapter * adapter)
1978 struct em_buffer *tx_buffer;
1981 INIT_DEBUGOUT("free_transmit_structures: begin");
1983 if (adapter->tx_buffer_area != NULL) {
1984 tx_buffer = adapter->tx_buffer_area;
1985 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1986 if (tx_buffer->m_head != NULL) {
1987 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1988 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1989 m_freem(tx_buffer->m_head);
1991 tx_buffer->m_head = NULL;
1994 if (adapter->tx_buffer_area != NULL) {
1995 free(adapter->tx_buffer_area, M_DEVBUF);
1996 adapter->tx_buffer_area = NULL;
1998 if (adapter->txtag != NULL) {
1999 bus_dma_tag_destroy(adapter->txtag);
2000 adapter->txtag = NULL;
2004 /*********************************************************************
2006 * The offload context needs to be set when we transfer the first
2007 * packet of a particular protocol (TCP/UDP). We change the
2008 * context only if the protocol type changes.
2010 **********************************************************************/
2012 em_transmit_checksum_setup(struct adapter * adapter,
2014 uint32_t *txd_upper,
2015 uint32_t *txd_lower)
2017 struct em_context_desc *TXD;
2018 struct em_buffer *tx_buffer;
2021 if (mp->m_pkthdr.csum_flags) {
2022 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2023 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2024 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2025 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2028 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2029 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2030 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2031 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2032 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2035 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2047 /* If we reach this point, the checksum offload context
2048 * needs to be reset.
2050 curr_txd = adapter->next_avail_tx_desc;
2051 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2052 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2054 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2055 TXD->lower_setup.ip_fields.ipcso =
2056 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2057 TXD->lower_setup.ip_fields.ipcse =
2058 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2060 TXD->upper_setup.tcp_fields.tucss =
2061 ETHER_HDR_LEN + sizeof(struct ip);
2062 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2064 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2065 TXD->upper_setup.tcp_fields.tucso =
2066 ETHER_HDR_LEN + sizeof(struct ip) +
2067 offsetof(struct tcphdr, th_sum);
2068 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2069 TXD->upper_setup.tcp_fields.tucso =
2070 ETHER_HDR_LEN + sizeof(struct ip) +
2071 offsetof(struct udphdr, uh_sum);
2074 TXD->tcp_seg_setup.data = htole32(0);
2075 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2077 tx_buffer->m_head = NULL;
2079 if (++curr_txd == adapter->num_tx_desc)
2082 adapter->num_tx_desc_avail--;
2083 adapter->next_avail_tx_desc = curr_txd;
2086 /**********************************************************************
2088 * Examine each tx_buffer in the used queue. If the hardware is done
2089 * processing the packet then free associated resources. The
2090 * tx_buffer is put back on the free queue.
2092 **********************************************************************/
2094 em_clean_transmit_interrupts(struct adapter *adapter)
2098 struct em_buffer *tx_buffer;
2099 struct em_tx_desc *tx_desc;
2100 struct ifnet *ifp = &adapter->interface_data.ac_if;
2102 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2107 adapter->clean_tx_interrupts++;
2109 num_avail = adapter->num_tx_desc_avail;
2110 i = adapter->oldest_used_tx_desc;
2112 tx_buffer = &adapter->tx_buffer_area[i];
2113 tx_desc = &adapter->tx_desc_base[i];
2115 while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2116 tx_desc->upper.data = 0;
2119 if (tx_buffer->m_head) {
2121 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2122 BUS_DMASYNC_POSTWRITE);
2123 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2124 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2126 m_freem(tx_buffer->m_head);
2127 tx_buffer->m_head = NULL;
2130 if (++i == adapter->num_tx_desc)
2133 tx_buffer = &adapter->tx_buffer_area[i];
2134 tx_desc = &adapter->tx_desc_base[i];
2137 adapter->oldest_used_tx_desc = i;
2140 * If we have enough room, clear IFF_OACTIVE to tell the stack
2141 * that it is OK to send packets.
2142 * If there are no pending descriptors, clear the timeout. Otherwise,
2143 * if some descriptors have been freed, restart the timeout.
2145 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2146 ifp->if_flags &= ~IFF_OACTIVE;
2147 if (num_avail == adapter->num_tx_desc)
2149 else if (num_avail == adapter->num_tx_desc_avail)
2150 ifp->if_timer = EM_TX_TIMEOUT;
2152 adapter->num_tx_desc_avail = num_avail;
2156 /*********************************************************************
2158 * Get a buffer from system mbuf buffer pool.
2160 **********************************************************************/
2162 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp)
2164 struct mbuf *mp = nmp;
2165 struct em_buffer *rx_buffer;
2170 ifp = &adapter->interface_data.ac_if;
2173 mp = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
2175 adapter->mbuf_cluster_failed++;
2178 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2180 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2181 mp->m_data = mp->m_ext.ext_buf;
2184 if (ifp->if_mtu <= ETHERMTU)
2185 m_adj(mp, ETHER_ALIGN);
2187 rx_buffer = &adapter->rx_buffer_area[i];
2190 * Using memory from the mbuf cluster pool, invoke the
2191 * bus_dma machinery to arrange the memory mapping.
2193 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2194 mtod(mp, void *), mp->m_len,
2195 em_dmamap_cb, &paddr, 0);
2200 rx_buffer->m_head = mp;
2201 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2202 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2207 /*********************************************************************
2209 * Allocate memory for rx_buffer structures. Since we use one
2210 * rx_buffer per received packet, the maximum number of rx_buffer's
2211 * that we'll need is equal to the number of receive descriptors
2212 * that we've allocated.
2214 **********************************************************************/
2216 em_allocate_receive_structures(struct adapter *adapter)
2219 struct em_buffer *rx_buffer;
2221 size = adapter->num_rx_desc * sizeof(struct em_buffer);
2222 adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2224 error = bus_dma_tag_create(NULL, /* parent */
2225 1, 0, /* alignment, bounds */
2226 BUS_SPACE_MAXADDR, /* lowaddr */
2227 BUS_SPACE_MAXADDR, /* highaddr */
2228 NULL, NULL, /* filter, filterarg */
2229 MCLBYTES, /* maxsize */
2231 MCLBYTES, /* maxsegsize */
2232 BUS_DMA_ALLOCNOW, /* flags */
2235 device_printf(adapter->dev, "em_allocate_receive_structures: "
2236 "bus_dma_tag_create failed; error %u\n", error);
2240 rx_buffer = adapter->rx_buffer_area;
2241 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2242 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2245 device_printf(adapter->dev,
2246 "em_allocate_receive_structures: "
2247 "bus_dmamap_create failed; error %u\n",
2253 for (i = 0; i < adapter->num_rx_desc; i++) {
2254 error = em_get_buf(i, adapter, NULL);
2256 adapter->rx_buffer_area[i].m_head = NULL;
2257 adapter->rx_desc_base[i].buffer_addr = 0;
2265 bus_dma_tag_destroy(adapter->rxtag);
2267 adapter->rxtag = NULL;
2268 free(adapter->rx_buffer_area, M_DEVBUF);
2269 adapter->rx_buffer_area = NULL;
2273 /*********************************************************************
2275 * Allocate and initialize receive structures.
2277 **********************************************************************/
2279 em_setup_receive_structures(struct adapter *adapter)
2281 bzero((void *) adapter->rx_desc_base,
2282 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2284 if (em_allocate_receive_structures(adapter))
2287 /* Setup our descriptor pointers */
2288 adapter->next_rx_desc_to_check = 0;
2292 /*********************************************************************
2294 * Enable receive unit.
2296 **********************************************************************/
2298 em_initialize_receive_unit(struct adapter *adapter)
2301 uint32_t reg_rxcsum;
2305 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2307 ifp = &adapter->interface_data.ac_if;
2309 /* Make sure receives are disabled while setting up the descriptor ring */
2310 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2312 /* Set the Receive Delay Timer Register */
2313 E1000_WRITE_REG(&adapter->hw, RDTR,
2314 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2316 if(adapter->hw.mac_type >= em_82540) {
2317 E1000_WRITE_REG(&adapter->hw, RADV,
2318 adapter->rx_abs_int_delay.value);
2320 /* Set the interrupt throttling rate in 256ns increments */
2321 if (em_int_throttle_ceil) {
2322 E1000_WRITE_REG(&adapter->hw, ITR,
2323 1000000000 / 256 / em_int_throttle_ceil);
2325 E1000_WRITE_REG(&adapter->hw, ITR, 0);
2329 /* Setup the Base and Length of the Rx Descriptor Ring */
2330 bus_addr = adapter->rxdma.dma_paddr;
2331 E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2332 E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2333 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2334 sizeof(struct em_rx_desc));
2336 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2337 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2338 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2340 /* Setup the Receive Control Register */
2341 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2342 E1000_RCTL_RDMTS_HALF |
2343 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2345 if (adapter->hw.tbi_compatibility_on == TRUE)
2346 reg_rctl |= E1000_RCTL_SBP;
2348 switch (adapter->rx_buffer_len) {
2350 case EM_RXBUFFER_2048:
2351 reg_rctl |= E1000_RCTL_SZ_2048;
2353 case EM_RXBUFFER_4096:
2354 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2356 case EM_RXBUFFER_8192:
2357 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2359 case EM_RXBUFFER_16384:
2360 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2364 if (ifp->if_mtu > ETHERMTU)
2365 reg_rctl |= E1000_RCTL_LPE;
2367 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2368 if ((adapter->hw.mac_type >= em_82543) &&
2369 (ifp->if_capenable & IFCAP_RXCSUM)) {
2370 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2371 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2372 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2375 /* Enable Receives */
2376 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2379 /*********************************************************************
2381 * Free receive related data structures.
2383 **********************************************************************/
2385 em_free_receive_structures(struct adapter *adapter)
2387 struct em_buffer *rx_buffer;
2390 INIT_DEBUGOUT("free_receive_structures: begin");
2392 if (adapter->rx_buffer_area != NULL) {
2393 rx_buffer = adapter->rx_buffer_area;
2394 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2395 if (rx_buffer->map != NULL) {
2396 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2397 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2399 if (rx_buffer->m_head != NULL)
2400 m_freem(rx_buffer->m_head);
2401 rx_buffer->m_head = NULL;
2404 if (adapter->rx_buffer_area != NULL) {
2405 free(adapter->rx_buffer_area, M_DEVBUF);
2406 adapter->rx_buffer_area = NULL;
2408 if (adapter->rxtag != NULL) {
2409 bus_dma_tag_destroy(adapter->rxtag);
2410 adapter->rxtag = NULL;
2414 /*********************************************************************
2416 * This routine executes in interrupt context. It replenishes
2417 * the mbufs in the descriptor and sends data which has been
2418 * dma'ed into host memory to upper layer.
2420 * We loop at most count times if count is > 0, or until done if
2423 *********************************************************************/
2425 em_process_receive_interrupts(struct adapter *adapter, int count)
2429 uint8_t accept_frame = 0;
2431 uint16_t len, desc_len, prev_len_adj;
2434 /* Pointer to the receive descriptor being examined. */
2435 struct em_rx_desc *current_desc;
2437 ifp = &adapter->interface_data.ac_if;
2438 i = adapter->next_rx_desc_to_check;
2439 current_desc = &adapter->rx_desc_base[i];
2441 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2443 adapter->no_pkts_avail++;
2447 while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2448 mp = adapter->rx_buffer_area[i].m_head;
2449 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2450 BUS_DMASYNC_POSTREAD);
2454 desc_len = le16toh(current_desc->length);
2455 if (current_desc->status & E1000_RXD_STAT_EOP) {
2458 if (desc_len < ETHER_CRC_LEN) {
2460 prev_len_adj = ETHER_CRC_LEN - desc_len;
2463 len = desc_len - ETHER_CRC_LEN;
2470 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2472 uint32_t pkt_len = desc_len;
2474 if (adapter->fmp != NULL)
2475 pkt_len += adapter->fmp->m_pkthdr.len;
2477 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2479 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2480 current_desc->errors,
2481 pkt_len, last_byte)) {
2482 em_tbi_adjust_stats(&adapter->hw,
2485 adapter->hw.mac_addr);
2495 if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2496 adapter->dropped_pkts++;
2497 em_get_buf(i, adapter, mp);
2498 if (adapter->fmp != NULL)
2499 m_freem(adapter->fmp);
2500 adapter->fmp = NULL;
2501 adapter->lmp = NULL;
2505 /* Assign correct length to the current fragment */
2508 if (adapter->fmp == NULL) {
2509 mp->m_pkthdr.len = len;
2510 adapter->fmp = mp; /* Store the first mbuf */
2513 /* Chain mbuf's together */
2514 mp->m_flags &= ~M_PKTHDR;
2516 * Adjust length of previous mbuf in chain if we
2517 * received less than 4 bytes in the last descriptor.
2519 if (prev_len_adj > 0) {
2520 adapter->lmp->m_len -= prev_len_adj;
2521 adapter->fmp->m_pkthdr.len -= prev_len_adj;
2523 adapter->lmp->m_next = mp;
2524 adapter->lmp = adapter->lmp->m_next;
2525 adapter->fmp->m_pkthdr.len += len;
2529 adapter->fmp->m_pkthdr.rcvif = ifp;
2532 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2533 em_receive_checksum(adapter, current_desc,
2535 if (current_desc->status & E1000_RXD_STAT_VP)
2536 VLAN_INPUT_TAG(adapter->fmp,
2537 (current_desc->special &
2538 E1000_RXD_SPC_VLAN_MASK));
2540 (*ifp->if_input)(ifp, adapter->fmp);
2542 em_receive_checksum(adapter, current_desc,
2544 if (current_desc->status & E1000_RXD_STAT_VP)
2545 VLAN_INPUT_TAG(ifp, adapter->fmp,
2546 (current_desc->special &
2547 E1000_RXD_SPC_VLAN_MASK),
2548 adapter->fmp = NULL);
2550 if (adapter->fmp != NULL)
2551 (*ifp->if_input)(ifp, adapter->fmp);
2553 adapter->fmp = NULL;
2554 adapter->lmp = NULL;
2557 adapter->dropped_pkts++;
2558 em_get_buf(i, adapter, mp);
2559 if (adapter->fmp != NULL)
2560 m_freem(adapter->fmp);
2561 adapter->fmp = NULL;
2562 adapter->lmp = NULL;
2565 /* Zero out the receive descriptors status */
2566 current_desc->status = 0;
2568 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
2569 E1000_WRITE_REG(&adapter->hw, RDT, i);
2571 /* Advance our pointers to the next descriptor */
2572 if (++i == adapter->num_rx_desc) {
2574 current_desc = adapter->rx_desc_base;
2578 adapter->next_rx_desc_to_check = i;
2581 /*********************************************************************
2583 * Verify that the hardware indicated that the checksum is valid.
2584 * Inform the stack about the status of checksum so that stack
2585 * doesn't spend time verifying the checksum.
2587 *********************************************************************/
2589 em_receive_checksum(struct adapter *adapter,
2590 struct em_rx_desc *rx_desc,
2593 /* 82543 or newer only */
2594 if ((adapter->hw.mac_type < em_82543) ||
2595 /* Ignore Checksum bit is set */
2596 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2597 mp->m_pkthdr.csum_flags = 0;
2601 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2603 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2604 /* IP Checksum Good */
2605 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2606 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2608 mp->m_pkthdr.csum_flags = 0;
2612 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2614 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2615 mp->m_pkthdr.csum_flags |=
2616 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2617 mp->m_pkthdr.csum_data = htons(0xffff);
2624 em_enable_vlans(struct adapter *adapter)
2628 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2630 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2631 ctrl |= E1000_CTRL_VME;
2632 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2636 em_enable_intr(struct adapter *adapter)
2638 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2642 em_disable_intr(struct adapter *adapter)
2644 E1000_WRITE_REG(&adapter->hw, IMC,
2645 (0xffffffff & ~E1000_IMC_RXSEQ));
2649 em_is_valid_ether_addr(uint8_t *addr)
2651 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2653 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2660 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2662 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2666 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2668 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2672 em_pci_set_mwi(struct em_hw *hw)
2674 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2675 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2679 em_pci_clear_mwi(struct em_hw *hw)
2681 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2682 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2686 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2688 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2689 return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2693 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2695 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2696 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2699 /*********************************************************************
2700 * 82544 Coexistence issue workaround.
2701 * There are 2 issues.
2702 * 1. Transmit Hang issue.
2703 * To detect this issue, following equation can be used...
2704 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2705 * If SUM[3:0] is in between 1 to 4, we will have this issue.
2708 * To detect this issue, following equation can be used...
2709 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2710 * If SUM[3:0] is in between 9 to c, we will have this issue.
2714 * Make sure we do not have ending address as 1,2,3,4(Hang) or
2717 *************************************************************************/
2719 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2721 /* Since issue is sensitive to length and address.*/
2722 /* Let us first check the address...*/
2723 uint32_t safe_terminator;
2725 desc_array->descriptor[0].address = address;
2726 desc_array->descriptor[0].length = length;
2727 desc_array->elements = 1;
2728 return(desc_array->elements);
2730 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2731 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2732 if (safe_terminator == 0 ||
2733 (safe_terminator > 4 && safe_terminator < 9) ||
2734 (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2735 desc_array->descriptor[0].address = address;
2736 desc_array->descriptor[0].length = length;
2737 desc_array->elements = 1;
2738 return(desc_array->elements);
2741 desc_array->descriptor[0].address = address;
2742 desc_array->descriptor[0].length = length - 4;
2743 desc_array->descriptor[1].address = address + (length - 4);
2744 desc_array->descriptor[1].length = 4;
2745 desc_array->elements = 2;
2746 return(desc_array->elements);
2749 /**********************************************************************
2751 * Update the board statistics counters.
2753 **********************************************************************/
2755 em_update_stats_counters(struct adapter *adapter)
2759 if (adapter->hw.media_type == em_media_type_copper ||
2760 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2761 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2762 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2764 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2765 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2766 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2767 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2769 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2770 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2771 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2772 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2773 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2774 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2775 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2776 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2777 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2778 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2779 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2780 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2781 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2782 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2783 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2784 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2785 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2786 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2787 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2788 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2790 /* For the 64-bit byte counters the low dword must be read first. */
2791 /* Both registers clear on the read of the high dword */
2793 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2794 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2795 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2796 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2798 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2799 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2800 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2801 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2802 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2804 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2805 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2806 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2807 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2809 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2810 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2811 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2812 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2813 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2814 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2815 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2816 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2817 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2818 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2820 if (adapter->hw.mac_type >= em_82543) {
2821 adapter->stats.algnerrc +=
2822 E1000_READ_REG(&adapter->hw, ALGNERRC);
2823 adapter->stats.rxerrc +=
2824 E1000_READ_REG(&adapter->hw, RXERRC);
2825 adapter->stats.tncrs +=
2826 E1000_READ_REG(&adapter->hw, TNCRS);
2827 adapter->stats.cexterr +=
2828 E1000_READ_REG(&adapter->hw, CEXTERR);
2829 adapter->stats.tsctc +=
2830 E1000_READ_REG(&adapter->hw, TSCTC);
2831 adapter->stats.tsctfc +=
2832 E1000_READ_REG(&adapter->hw, TSCTFC);
2834 ifp = &adapter->interface_data.ac_if;
2836 /* Fill out the OS statistics structure */
2837 ifp->if_ibytes = adapter->stats.gorcl;
2838 ifp->if_obytes = adapter->stats.gotcl;
2839 ifp->if_imcasts = adapter->stats.mprc;
2840 ifp->if_collisions = adapter->stats.colc;
2843 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2844 adapter->stats.crcerrs + adapter->stats.algnerrc +
2845 adapter->stats.rlec + adapter->stats.rnbc +
2846 adapter->stats.mpc + adapter->stats.cexterr;
2849 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2853 /**********************************************************************
2855 * This routine is called only when em_display_debug_stats is enabled.
2856 * This routine provides a way to take a look at important statistics
2857 * maintained by the driver and hardware.
2859 **********************************************************************/
2861 em_print_debug_info(struct adapter *adapter)
2863 device_t dev= adapter->dev;
2864 uint8_t *hw_addr = adapter->hw.hw_addr;
2866 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2867 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2868 E1000_READ_REG(&adapter->hw, TIDV),
2869 E1000_READ_REG(&adapter->hw, TADV));
2870 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2871 E1000_READ_REG(&adapter->hw, RDTR),
2872 E1000_READ_REG(&adapter->hw, RADV));
2874 device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2875 device_printf(dev, "CleanTxInterrupts = %ld\n",
2876 adapter->clean_tx_interrupts);
2878 device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2879 (long long)adapter->tx_fifo_wrk,
2880 (long long)adapter->tx_fifo_reset);
2881 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2882 E1000_READ_REG(&adapter->hw, TDH),
2883 E1000_READ_REG(&adapter->hw, TDT));
2884 device_printf(dev, "Num Tx descriptors avail = %d\n",
2885 adapter->num_tx_desc_avail);
2886 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2887 adapter->no_tx_desc_avail1);
2888 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2889 adapter->no_tx_desc_avail2);
2890 device_printf(dev, "Std mbuf failed = %ld\n",
2891 adapter->mbuf_alloc_failed);
2892 device_printf(dev, "Std mbuf cluster failed = %ld\n",
2893 adapter->mbuf_cluster_failed);
2894 device_printf(dev, "Driver dropped packets = %ld\n",
2895 adapter->dropped_pkts);
2899 em_print_hw_stats(struct adapter *adapter)
2901 device_t dev= adapter->dev;
2903 device_printf(dev, "Excessive collisions = %lld\n",
2904 (long long)adapter->stats.ecol);
2905 device_printf(dev, "Symbol errors = %lld\n",
2906 (long long)adapter->stats.symerrs);
2907 device_printf(dev, "Sequence errors = %lld\n",
2908 (long long)adapter->stats.sec);
2909 device_printf(dev, "Defer count = %lld\n",
2910 (long long)adapter->stats.dc);
2912 device_printf(dev, "Missed Packets = %lld\n",
2913 (long long)adapter->stats.mpc);
2914 device_printf(dev, "Receive No Buffers = %lld\n",
2915 (long long)adapter->stats.rnbc);
2916 device_printf(dev, "Receive length errors = %lld\n",
2917 (long long)adapter->stats.rlec);
2918 device_printf(dev, "Receive errors = %lld\n",
2919 (long long)adapter->stats.rxerrc);
2920 device_printf(dev, "Crc errors = %lld\n",
2921 (long long)adapter->stats.crcerrs);
2922 device_printf(dev, "Alignment errors = %lld\n",
2923 (long long)adapter->stats.algnerrc);
2924 device_printf(dev, "Carrier extension errors = %lld\n",
2925 (long long)adapter->stats.cexterr);
2927 device_printf(dev, "XON Rcvd = %lld\n",
2928 (long long)adapter->stats.xonrxc);
2929 device_printf(dev, "XON Xmtd = %lld\n",
2930 (long long)adapter->stats.xontxc);
2931 device_printf(dev, "XOFF Rcvd = %lld\n",
2932 (long long)adapter->stats.xoffrxc);
2933 device_printf(dev, "XOFF Xmtd = %lld\n",
2934 (long long)adapter->stats.xofftxc);
2936 device_printf(dev, "Good Packets Rcvd = %lld\n",
2937 (long long)adapter->stats.gprc);
2938 device_printf(dev, "Good Packets Xmtd = %lld\n",
2939 (long long)adapter->stats.gptc);
2943 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2947 struct adapter *adapter;
2950 error = sysctl_handle_int(oidp, &result, 0, req);
2952 if (error || !req->newptr)
2956 adapter = (struct adapter *)arg1;
2957 em_print_debug_info(adapter);
2964 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2968 struct adapter *adapter;
2971 error = sysctl_handle_int(oidp, &result, 0, req);
2973 if (error || !req->newptr)
2977 adapter = (struct adapter *)arg1;
2978 em_print_hw_stats(adapter);
2985 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2987 struct em_int_delay_info *info;
2988 struct adapter *adapter;
2995 info = (struct em_int_delay_info *)arg1;
2996 adapter = info->adapter;
2997 usecs = info->value;
2998 error = sysctl_handle_int(oidp, &usecs, 0, req);
2999 if (error != 0 || req->newptr == NULL)
3001 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3003 info->value = usecs;
3004 ticks = E1000_USECS_TO_TICKS(usecs);
3007 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3008 regval = (regval & ~0xffff) | (ticks & 0xffff);
3009 /* Handle a few special cases. */
3010 switch (info->offset) {
3012 case E1000_82542_RDTR:
3013 regval |= E1000_RDT_FPDB;
3016 case E1000_82542_TIDV:
3018 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3019 /* Don't write 0 into the TIDV register. */
3022 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3025 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3031 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3032 const char *description, struct em_int_delay_info *info,
3033 int offset, int value)
3035 info->adapter = adapter;
3036 info->offset = offset;
3037 info->value = value;
3038 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3039 SYSCTL_CHILDREN(adapter->sysctl_tree),
3040 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3041 info, 0, em_sysctl_int_delay, "I", description);
3045 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3047 struct adapter *adapter = (void *)arg1;
3051 throttle = em_int_throttle_ceil;
3052 error = sysctl_handle_int(oidp, &throttle, 0, req);
3053 if (error || req->newptr == NULL)
3055 if (throttle < 0 || throttle > 1000000000 / 256)
3059 * Set the interrupt throttling rate in 256ns increments,
3060 * recalculate sysctl value assignment to get exact frequency.
3062 throttle = 1000000000 / 256 / throttle;
3063 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3065 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3068 em_int_throttle_ceil = 0;
3070 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3073 device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n",
3074 em_int_throttle_ceil);