1 /**************************************************************************
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
5 Copyright (c) 2001-2003, Intel Corporation
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
34 ***************************************************************************/
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.26 2005/02/05 23:23:25 joerg Exp $*/
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int em_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
50 char em_driver_version[] = "1.7.25";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into em_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static em_vendor_info_t em_vendor_info_array[] =
65 /* Intel(R) PRO/1000 Network Connection */
66 { 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
67 { 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
68 { 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
69 { 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
72 { 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
73 { 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
74 { 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
80 { 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
81 { 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
82 { 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
83 { 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
85 { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
86 { 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
90 { 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
91 { 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
96 { 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
97 { 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
98 /* required last entry */
102 /*********************************************************************
103 * Table of branding strings for all supported NICs.
104 *********************************************************************/
106 static const char *em_strings[] = {
107 "Intel(R) PRO/1000 Network Connection"
110 /*********************************************************************
111 * Function prototypes
112 *********************************************************************/
113 static int em_probe(device_t);
114 static int em_attach(device_t);
115 static int em_detach(device_t);
116 static int em_shutdown(device_t);
117 static void em_intr(void *);
118 static void em_start(struct ifnet *);
119 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
120 static void em_watchdog(struct ifnet *);
121 static void em_init(void *);
122 static void em_stop(void *);
123 static void em_media_status(struct ifnet *, struct ifmediareq *);
124 static int em_media_change(struct ifnet *);
125 static void em_identify_hardware(struct adapter *);
126 static void em_local_timer(void *);
127 static int em_hardware_init(struct adapter *);
128 static void em_setup_interface(device_t, struct adapter *);
129 static int em_setup_transmit_structures(struct adapter *);
130 static void em_initialize_transmit_unit(struct adapter *);
131 static int em_setup_receive_structures(struct adapter *);
132 static void em_initialize_receive_unit(struct adapter *);
133 static void em_enable_intr(struct adapter *);
134 static void em_disable_intr(struct adapter *);
135 static void em_free_transmit_structures(struct adapter *);
136 static void em_free_receive_structures(struct adapter *);
137 static void em_update_stats_counters(struct adapter *);
138 static void em_clean_transmit_interrupts(struct adapter *);
139 static int em_allocate_receive_structures(struct adapter *);
140 static int em_allocate_transmit_structures(struct adapter *);
141 static void em_process_receive_interrupts(struct adapter *, int);
142 static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
144 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
145 uint32_t *, uint32_t *);
146 static void em_set_promisc(struct adapter *);
147 static void em_disable_promisc(struct adapter *);
148 static void em_set_multi(struct adapter *);
149 static void em_print_hw_stats(struct adapter *);
150 static void em_print_link_status(struct adapter *);
151 static int em_get_buf(int i, struct adapter *, struct mbuf *, int how);
152 static void em_enable_vlans(struct adapter *);
153 static int em_encap(struct adapter *, struct mbuf *);
154 static void em_smartspeed(struct adapter *);
155 static int em_82547_fifo_workaround(struct adapter *, int);
156 static void em_82547_update_fifo_head(struct adapter *, int);
157 static int em_82547_tx_fifo_reset(struct adapter *);
158 static void em_82547_move_tail(void *arg);
159 static int em_dma_malloc(struct adapter *, bus_size_t,
160 struct em_dma_alloc *, int);
161 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
162 static void em_print_debug_info(struct adapter *);
163 static int em_is_valid_ether_addr(uint8_t *);
164 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
165 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
166 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length,
167 PDESC_ARRAY desc_array);
168 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
169 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
170 static void em_add_int_delay_sysctl(struct adapter *, const char *,
172 struct em_int_delay_info *, int, int);
174 /*********************************************************************
175 * FreeBSD Device Interface Entry Points
176 *********************************************************************/
178 static device_method_t em_methods[] = {
179 /* Device interface */
180 DEVMETHOD(device_probe, em_probe),
181 DEVMETHOD(device_attach, em_attach),
182 DEVMETHOD(device_detach, em_detach),
183 DEVMETHOD(device_shutdown, em_shutdown),
187 static driver_t em_driver = {
188 "em", em_methods, sizeof(struct adapter),
191 static devclass_t em_devclass;
193 DECLARE_DUMMY_MODULE(if_em);
194 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
196 /*********************************************************************
197 * Tunable default values.
198 *********************************************************************/
200 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
201 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
203 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
204 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
205 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
206 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
207 static int em_int_throttle_ceil = 10000;
209 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
210 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
211 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
212 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
213 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
215 /*********************************************************************
216 * Device identification routine
218 * em_probe determines if the driver should be loaded on
219 * adapter based on PCI vendor/device id of the adapter.
221 * return 0 on success, positive on failure
222 *********************************************************************/
225 em_probe(device_t dev)
227 em_vendor_info_t *ent;
229 uint16_t pci_vendor_id = 0;
230 uint16_t pci_device_id = 0;
231 uint16_t pci_subvendor_id = 0;
232 uint16_t pci_subdevice_id = 0;
233 char adapter_name[60];
235 INIT_DEBUGOUT("em_probe: begin");
237 pci_vendor_id = pci_get_vendor(dev);
238 if (pci_vendor_id != EM_VENDOR_ID)
241 pci_device_id = pci_get_device(dev);
242 pci_subvendor_id = pci_get_subvendor(dev);
243 pci_subdevice_id = pci_get_subdevice(dev);
245 ent = em_vendor_info_array;
246 while (ent->vendor_id != 0) {
247 if ((pci_vendor_id == ent->vendor_id) &&
248 (pci_device_id == ent->device_id) &&
250 ((pci_subvendor_id == ent->subvendor_id) ||
251 (ent->subvendor_id == PCI_ANY_ID)) &&
253 ((pci_subdevice_id == ent->subdevice_id) ||
254 (ent->subdevice_id == PCI_ANY_ID))) {
255 snprintf(adapter_name, sizeof(adapter_name),
256 "%s, Version - %s", em_strings[ent->index],
258 device_set_desc_copy(dev, adapter_name);
267 /*********************************************************************
268 * Device initialization routine
270 * The attach entry point is called when the driver is being loaded.
271 * This routine identifies the type of hardware, allocates all resources
272 * and initializes the hardware.
274 * return 0 on success, positive on failure
275 *********************************************************************/
278 em_attach(device_t dev)
280 struct adapter *adapter;
285 INIT_DEBUGOUT("em_attach: begin");
287 adapter = device_get_softc(dev);
289 bzero(adapter, sizeof(struct adapter));
291 callout_init(&adapter->timer);
292 callout_init(&adapter->tx_fifo_timer);
295 adapter->osdep.dev = dev;
298 sysctl_ctx_init(&adapter->sysctl_ctx);
299 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
300 SYSCTL_STATIC_CHILDREN(_hw),
302 device_get_nameunit(dev),
306 if (adapter->sysctl_tree == NULL) {
311 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
312 SYSCTL_CHILDREN(adapter->sysctl_tree),
313 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
315 em_sysctl_debug_info, "I", "Debug Information");
317 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
318 SYSCTL_CHILDREN(adapter->sysctl_tree),
319 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
321 em_sysctl_stats, "I", "Statistics");
323 /* Determine hardware revision */
324 em_identify_hardware(adapter);
326 /* Set up some sysctls for the tunable interrupt delays */
327 em_add_int_delay_sysctl(adapter, "rx_int_delay",
328 "receive interrupt delay in usecs",
329 &adapter->rx_int_delay,
330 E1000_REG_OFFSET(&adapter->hw, RDTR),
331 em_rx_int_delay_dflt);
332 em_add_int_delay_sysctl(adapter, "tx_int_delay",
333 "transmit interrupt delay in usecs",
334 &adapter->tx_int_delay,
335 E1000_REG_OFFSET(&adapter->hw, TIDV),
336 em_tx_int_delay_dflt);
337 if (adapter->hw.mac_type >= em_82540) {
338 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
339 "receive interrupt delay limit in usecs",
340 &adapter->rx_abs_int_delay,
341 E1000_REG_OFFSET(&adapter->hw, RADV),
342 em_rx_abs_int_delay_dflt);
343 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
344 "transmit interrupt delay limit in usecs",
345 &adapter->tx_abs_int_delay,
346 E1000_REG_OFFSET(&adapter->hw, TADV),
347 em_tx_abs_int_delay_dflt);
348 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
349 SYSCTL_CHILDREN(adapter->sysctl_tree),
350 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
351 adapter, 0, em_sysctl_int_throttle, "I", NULL);
354 /* Parameters (to be read from user) */
355 adapter->num_tx_desc = EM_MAX_TXD;
356 adapter->num_rx_desc = EM_MAX_RXD;
357 adapter->hw.autoneg = DO_AUTO_NEG;
358 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
359 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
360 adapter->hw.tbi_compatibility_en = TRUE;
361 adapter->rx_buffer_len = EM_RXBUFFER_2048;
364 * These parameters control the automatic generation(Tx) and
365 * response(Rx) to Ethernet PAUSE frames.
367 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
368 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH;
369 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
370 adapter->hw.fc_send_xon = TRUE;
371 adapter->hw.fc = em_fc_full;
373 adapter->hw.phy_init_script = 1;
374 adapter->hw.phy_reset_disable = FALSE;
376 #ifndef EM_MASTER_SLAVE
377 adapter->hw.master_slave = em_ms_hw_default;
379 adapter->hw.master_slave = EM_MASTER_SLAVE;
383 * Set the max frame size assuming standard ethernet
386 adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
388 adapter->hw.min_frame_size =
389 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
392 * This controls when hardware reports transmit completion
395 adapter->hw.report_tx_early = 1;
398 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
400 if (!(adapter->res_memory)) {
401 device_printf(dev, "Unable to allocate bus resource: memory\n");
405 adapter->osdep.mem_bus_space_tag =
406 rman_get_bustag(adapter->res_memory);
407 adapter->osdep.mem_bus_space_handle =
408 rman_get_bushandle(adapter->res_memory);
409 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
411 if (adapter->hw.mac_type > em_82543) {
412 /* Figure our where our IO BAR is ? */
414 for (i = 0; i < 5; i++) {
415 val = pci_read_config(dev, rid, 4);
416 if (val & 0x00000001) {
417 adapter->io_rid = rid;
423 adapter->res_ioport = bus_alloc_resource_any(dev,
424 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
425 if (!(adapter->res_ioport)) {
426 device_printf(dev, "Unable to allocate bus resource: ioport\n");
431 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
432 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
436 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
437 &rid, RF_SHAREABLE | RF_ACTIVE);
438 if (!(adapter->res_interrupt)) {
439 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
444 adapter->hw.back = &adapter->osdep;
446 /* Initialize eeprom parameters */
447 em_init_eeprom_params(&adapter->hw);
449 tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
451 /* Allocate Transmit Descriptor ring */
452 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
453 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
457 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
459 rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
461 /* Allocate Receive Descriptor ring */
462 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
463 device_printf(dev, "Unable to allocate rx_desc memory\n");
467 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
469 /* Initialize the hardware */
470 if (em_hardware_init(adapter)) {
471 device_printf(dev, "Unable to initialize the hardware\n");
476 /* Copy the permanent MAC address out of the EEPROM */
477 if (em_read_mac_addr(&adapter->hw) < 0) {
478 device_printf(dev, "EEPROM read error while reading mac address\n");
483 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
484 device_printf(dev, "Invalid mac address\n");
489 /* Setup OS specific network interface */
490 em_setup_interface(dev, adapter);
492 /* Initialize statistics */
493 em_clear_hw_cntrs(&adapter->hw);
494 em_update_stats_counters(adapter);
495 adapter->hw.get_link_status = 1;
496 em_check_for_link(&adapter->hw);
498 /* Print the link status */
499 if (adapter->link_active == 1) {
500 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
501 &adapter->link_duplex);
502 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
504 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
506 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
508 /* Identify 82544 on PCIX */
509 em_get_bus_info(&adapter->hw);
510 if (adapter->hw.bus_type == em_bus_type_pcix &&
511 adapter->hw.mac_type == em_82544)
512 adapter->pcix_82544 = TRUE;
514 adapter->pcix_82544 = FALSE;
516 error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET,
517 (void (*)(void *)) em_intr, adapter,
518 &adapter->int_handler_tag);
520 device_printf(dev, "Error registering interrupt handler!\n");
521 ether_ifdetach(&adapter->interface_data.ac_if);
525 INIT_DEBUGOUT("em_attach: end");
533 /*********************************************************************
534 * Device removal routine
536 * The detach entry point is called when the driver is being removed.
537 * This routine stops the adapter and deallocates all the resources
538 * that were allocated for driver operation.
540 * return 0 on success, positive on failure
541 *********************************************************************/
544 em_detach(device_t dev)
546 struct adapter * adapter = device_get_softc(dev);
549 INIT_DEBUGOUT("em_detach: begin");
552 adapter->in_detach = 1;
554 if (device_is_attached(dev)) {
556 em_phy_hw_reset(&adapter->hw);
557 ether_ifdetach(&adapter->interface_data.ac_if);
559 bus_generic_detach(dev);
561 if (adapter->res_interrupt != NULL) {
562 bus_teardown_intr(dev, adapter->res_interrupt,
563 adapter->int_handler_tag);
564 bus_release_resource(dev, SYS_RES_IRQ, 0,
565 adapter->res_interrupt);
567 if (adapter->res_memory != NULL) {
568 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
569 adapter->res_memory);
572 if (adapter->res_ioport != NULL) {
573 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
574 adapter->res_ioport);
577 /* Free Transmit Descriptor ring */
578 if (adapter->tx_desc_base != NULL) {
579 em_dma_free(adapter, &adapter->txdma);
580 adapter->tx_desc_base = NULL;
583 /* Free Receive Descriptor ring */
584 if (adapter->rx_desc_base != NULL) {
585 em_dma_free(adapter, &adapter->rxdma);
586 adapter->rx_desc_base = NULL;
589 adapter->sysctl_tree = NULL;
590 sysctl_ctx_free(&adapter->sysctl_ctx);
596 /*********************************************************************
598 * Shutdown entry point
600 **********************************************************************/
603 em_shutdown(device_t dev)
605 struct adapter *adapter = device_get_softc(dev);
610 /*********************************************************************
611 * Transmit entry point
613 * em_start is called by the stack to initiate a transmit.
614 * The driver will remain in this routine as long as there are
615 * packets to transmit and transmit resources are available.
616 * In case resources are not available stack is notified and
617 * the packet is requeued.
618 **********************************************************************/
621 em_start(struct ifnet *ifp)
625 struct adapter *adapter = ifp->if_softc;
627 if (!adapter->link_active)
631 while (ifp->if_snd.ifq_head != NULL) {
632 IF_DEQUEUE(&ifp->if_snd, m_head);
637 if (em_encap(adapter, m_head)) {
638 ifp->if_flags |= IFF_OACTIVE;
639 IF_PREPEND(&ifp->if_snd, m_head);
643 /* Send a copy of the frame to the BPF listener */
644 BPF_MTAP(ifp, m_head);
646 /* Set timeout in case hardware has problems transmitting */
647 ifp->if_timer = EM_TX_TIMEOUT;
652 /*********************************************************************
655 * em_ioctl is called when the user wants to configure the
658 * return 0 on success, positive on failure
659 **********************************************************************/
662 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
664 int s, mask, error = 0;
665 struct ifreq *ifr = (struct ifreq *) data;
666 struct adapter *adapter = ifp->if_softc;
670 if (adapter->in_detach)
676 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
677 ether_ioctl(ifp, command, data);
680 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
681 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
684 ifp->if_mtu = ifr->ifr_mtu;
685 adapter->hw.max_frame_size =
686 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
691 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
692 if (ifp->if_flags & IFF_UP) {
693 if (!(ifp->if_flags & IFF_RUNNING))
695 em_disable_promisc(adapter);
696 em_set_promisc(adapter);
698 if (ifp->if_flags & IFF_RUNNING)
704 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
705 if (ifp->if_flags & IFF_RUNNING) {
706 em_disable_intr(adapter);
707 em_set_multi(adapter);
708 if (adapter->hw.mac_type == em_82542_rev2_0)
709 em_initialize_receive_unit(adapter);
710 #ifdef DEVICE_POLLING
711 if (!(ifp->if_flags & IFF_POLLING))
713 em_enable_intr(adapter);
718 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
719 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
722 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
723 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
724 if (mask & IFCAP_HWCSUM) {
725 if (IFCAP_HWCSUM & ifp->if_capenable)
726 ifp->if_capenable &= ~IFCAP_HWCSUM;
728 ifp->if_capenable |= IFCAP_HWCSUM;
729 if (ifp->if_flags & IFF_RUNNING)
734 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
743 /*********************************************************************
744 * Watchdog entry point
746 * This routine is called whenever hardware quits transmitting.
748 **********************************************************************/
751 em_watchdog(struct ifnet *ifp)
753 struct adapter * adapter;
754 adapter = ifp->if_softc;
756 /* If we are in this routine because of pause frames, then
757 * don't reset the hardware.
759 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
760 ifp->if_timer = EM_TX_TIMEOUT;
764 if (em_check_for_link(&adapter->hw))
765 if_printf(ifp, "watchdog timeout -- resetting\n");
767 ifp->if_flags &= ~IFF_RUNNING;
774 /*********************************************************************
777 * This routine is used in two ways. It is used by the stack as
778 * init entry point in network interface structure. It is also used
779 * by the driver as a hw/sw initialization routine to get to a
782 * return 0 on success, positive on failure
783 **********************************************************************/
789 struct adapter *adapter = arg;
790 struct ifnet *ifp = &adapter->interface_data.ac_if;
792 INIT_DEBUGOUT("em_init: begin");
798 /* Get the latest mac address, User can use a LAA */
799 bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
802 /* Initialize the hardware */
803 if (em_hardware_init(adapter)) {
804 if_printf(ifp, "Unable to initialize the hardware\n");
809 em_enable_vlans(adapter);
811 /* Prepare transmit descriptors and buffers */
812 if (em_setup_transmit_structures(adapter)) {
813 if_printf(ifp, "Could not setup transmit structures\n");
818 em_initialize_transmit_unit(adapter);
820 /* Setup Multicast table */
821 em_set_multi(adapter);
823 /* Prepare receive descriptors and buffers */
824 if (em_setup_receive_structures(adapter)) {
825 if_printf(ifp, "Could not setup receive structures\n");
830 em_initialize_receive_unit(adapter);
832 /* Don't loose promiscuous settings */
833 em_set_promisc(adapter);
835 ifp->if_flags |= IFF_RUNNING;
836 ifp->if_flags &= ~IFF_OACTIVE;
838 if (adapter->hw.mac_type >= em_82543) {
839 if (ifp->if_capenable & IFCAP_TXCSUM)
840 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
842 ifp->if_hwassist = 0;
845 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
846 em_clear_hw_cntrs(&adapter->hw);
847 #ifdef DEVICE_POLLING
849 * Only enable interrupts if we are not polling, make sure
850 * they are off otherwise.
852 if (ifp->if_flags & IFF_POLLING)
853 em_disable_intr(adapter);
855 #endif /* DEVICE_POLLING */
856 em_enable_intr(adapter);
858 /* Don't reset the phy next time init gets called */
859 adapter->hw.phy_reset_disable = TRUE;
864 #ifdef DEVICE_POLLING
865 static poll_handler_t em_poll;
868 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
870 struct adapter *adapter = ifp->if_softc;
873 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
874 em_enable_intr(adapter);
877 if (cmd == POLL_AND_CHECK_STATUS) {
878 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
879 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
880 callout_stop(&adapter->timer);
881 adapter->hw.get_link_status = 1;
882 em_check_for_link(&adapter->hw);
883 em_print_link_status(adapter);
884 callout_reset(&adapter->timer, 2*hz, em_local_timer,
888 if (ifp->if_flags & IFF_RUNNING) {
889 em_process_receive_interrupts(adapter, count);
890 em_clean_transmit_interrupts(adapter);
893 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
896 #endif /* DEVICE_POLLING */
898 /*********************************************************************
900 * Interrupt Service routine
902 **********************************************************************/
908 struct adapter *adapter = arg;
910 ifp = &adapter->interface_data.ac_if;
912 #ifdef DEVICE_POLLING
913 if (ifp->if_flags & IFF_POLLING)
916 if (ether_poll_register(em_poll, ifp)) {
917 em_disable_intr(adapter);
921 #endif /* DEVICE_POLLING */
923 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
927 /* Link status change */
928 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
929 callout_stop(&adapter->timer);
930 adapter->hw.get_link_status = 1;
931 em_check_for_link(&adapter->hw);
932 em_print_link_status(adapter);
933 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
937 * note: do not attempt to improve efficiency by looping. This
938 * only results in unnecessary piecemeal collection of received
939 * packets and unnecessary piecemeal cleanups of the transmit ring.
941 if (ifp->if_flags & IFF_RUNNING) {
942 em_process_receive_interrupts(adapter, -1);
943 em_clean_transmit_interrupts(adapter);
946 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
950 /*********************************************************************
952 * Media Ioctl callback
954 * This routine is called whenever the user queries the status of
955 * the interface using ifconfig.
957 **********************************************************************/
959 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
961 struct adapter * adapter = ifp->if_softc;
963 INIT_DEBUGOUT("em_media_status: begin");
965 em_check_for_link(&adapter->hw);
966 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
967 if (adapter->link_active == 0) {
968 em_get_speed_and_duplex(&adapter->hw,
969 &adapter->link_speed,
970 &adapter->link_duplex);
971 adapter->link_active = 1;
974 if (adapter->link_active == 1) {
975 adapter->link_speed = 0;
976 adapter->link_duplex = 0;
977 adapter->link_active = 0;
981 ifmr->ifm_status = IFM_AVALID;
982 ifmr->ifm_active = IFM_ETHER;
984 if (!adapter->link_active)
987 ifmr->ifm_status |= IFM_ACTIVE;
989 if (adapter->hw.media_type == em_media_type_fiber) {
990 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
992 switch (adapter->link_speed) {
994 ifmr->ifm_active |= IFM_10_T;
997 ifmr->ifm_active |= IFM_100_TX;
1000 ifmr->ifm_active |= IFM_1000_TX;
1003 if (adapter->link_duplex == FULL_DUPLEX)
1004 ifmr->ifm_active |= IFM_FDX;
1006 ifmr->ifm_active |= IFM_HDX;
1010 /*********************************************************************
1012 * Media Ioctl callback
1014 * This routine is called when the user changes speed/duplex using
1015 * media/mediopt option with ifconfig.
1017 **********************************************************************/
1019 em_media_change(struct ifnet *ifp)
1021 struct adapter * adapter = ifp->if_softc;
1022 struct ifmedia *ifm = &adapter->media;
1024 INIT_DEBUGOUT("em_media_change: begin");
1026 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1029 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1031 adapter->hw.autoneg = DO_AUTO_NEG;
1032 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1036 adapter->hw.autoneg = DO_AUTO_NEG;
1037 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1040 adapter->hw.autoneg = FALSE;
1041 adapter->hw.autoneg_advertised = 0;
1042 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1043 adapter->hw.forced_speed_duplex = em_100_full;
1045 adapter->hw.forced_speed_duplex = em_100_half;
1048 adapter->hw.autoneg = FALSE;
1049 adapter->hw.autoneg_advertised = 0;
1050 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1051 adapter->hw.forced_speed_duplex = em_10_full;
1053 adapter->hw.forced_speed_duplex = em_10_half;
1056 if_printf(ifp, "Unsupported media type\n");
1059 * As the speed/duplex settings may have changed we need to
1062 adapter->hw.phy_reset_disable = FALSE;
1070 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1073 struct em_q *q = arg;
1077 KASSERT(nsegs <= EM_MAX_SCATTER,
1078 ("Too many DMA segments returned when mapping tx packet"));
1080 bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1083 #define EM_FIFO_HDR 0x10
1084 #define EM_82547_PKT_THRESH 0x3e0
1085 #define EM_82547_TX_FIFO_SIZE 0x2800
1086 #define EM_82547_TX_FIFO_BEGIN 0xf00
1087 /*********************************************************************
1089 * This routine maps the mbufs to tx descriptors.
1091 * return 0 on success, positive on failure
1092 **********************************************************************/
1094 em_encap(struct adapter *adapter, struct mbuf *m_head)
1097 uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1101 /* For 82544 Workaround */
1102 DESC_ARRAY desc_array;
1103 uint32_t array_elements;
1106 struct ifvlan *ifv = NULL;
1108 struct em_buffer *tx_buffer = NULL;
1109 struct em_tx_desc *current_tx_desc = NULL;
1110 struct ifnet *ifp = &adapter->interface_data.ac_if;
1113 * Force a cleanup if number of TX descriptors
1114 * available hits the threshold
1116 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1117 em_clean_transmit_interrupts(adapter);
1118 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1119 adapter->no_tx_desc_avail1++;
1124 * Map the packet for DMA.
1126 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1127 adapter->no_tx_map_avail++;
1130 error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1131 &q, BUS_DMA_NOWAIT);
1133 adapter->no_tx_dma_setup++;
1134 bus_dmamap_destroy(adapter->txtag, q.map);
1137 KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1139 if (q.nsegs > adapter->num_tx_desc_avail) {
1140 adapter->no_tx_desc_avail2++;
1141 bus_dmamap_unload(adapter->txtag, q.map);
1142 bus_dmamap_destroy(adapter->txtag, q.map);
1146 if (ifp->if_hwassist > 0) {
1147 em_transmit_checksum_setup(adapter, m_head,
1148 &txd_upper, &txd_lower);
1151 txd_upper = txd_lower = 0;
1153 /* Find out if we are in vlan mode */
1154 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1155 m_head->m_pkthdr.rcvif != NULL &&
1156 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1157 ifv = m_head->m_pkthdr.rcvif->if_softc;
1159 i = adapter->next_avail_tx_desc;
1160 if (adapter->pcix_82544) {
1164 for (j = 0; j < q.nsegs; j++) {
1165 /* If adapter is 82544 and on PCIX bus */
1166 if(adapter->pcix_82544) {
1168 address = htole64(q.segs[j].ds_addr);
1170 * Check the Address and Length combination and
1171 * split the data accordingly
1173 array_elements = em_fill_descriptors(address,
1174 htole32(q.segs[j].ds_len),
1176 for (counter = 0; counter < array_elements; counter++) {
1177 if (txd_used == adapter->num_tx_desc_avail) {
1178 adapter->next_avail_tx_desc = txd_saved;
1179 adapter->no_tx_desc_avail2++;
1180 bus_dmamap_unload(adapter->txtag, q.map);
1181 bus_dmamap_destroy(adapter->txtag, q.map);
1184 tx_buffer = &adapter->tx_buffer_area[i];
1185 current_tx_desc = &adapter->tx_desc_base[i];
1186 current_tx_desc->buffer_addr = htole64(
1187 desc_array.descriptor[counter].address);
1188 current_tx_desc->lower.data = htole32(
1189 (adapter->txd_cmd | txd_lower |
1190 (uint16_t)desc_array.descriptor[counter].length));
1191 current_tx_desc->upper.data = htole32((txd_upper));
1192 if (++i == adapter->num_tx_desc)
1195 tx_buffer->m_head = NULL;
1199 tx_buffer = &adapter->tx_buffer_area[i];
1200 current_tx_desc = &adapter->tx_desc_base[i];
1202 current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1203 current_tx_desc->lower.data = htole32(
1204 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1205 current_tx_desc->upper.data = htole32(txd_upper);
1207 if (++i == adapter->num_tx_desc)
1210 tx_buffer->m_head = NULL;
1214 adapter->next_avail_tx_desc = i;
1215 if (adapter->pcix_82544)
1216 adapter->num_tx_desc_avail -= txd_used;
1218 adapter->num_tx_desc_avail -= q.nsegs;
1221 /* Set the vlan id */
1222 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1224 /* Tell hardware to add tag */
1225 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1228 tx_buffer->m_head = m_head;
1229 tx_buffer->map = q.map;
1230 bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1233 * Last Descriptor of Packet needs End Of Packet (EOP)
1235 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1238 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1239 * that this frame is available to transmit.
1241 if (adapter->hw.mac_type == em_82547 &&
1242 adapter->link_duplex == HALF_DUPLEX) {
1243 em_82547_move_tail(adapter);
1245 E1000_WRITE_REG(&adapter->hw, TDT, i);
1246 if (adapter->hw.mac_type == em_82547) {
1247 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1254 /*********************************************************************
1256 * 82547 workaround to avoid controller hang in half-duplex environment.
1257 * The workaround is to avoid queuing a large packet that would span
1258 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1259 * in this case. We do that only when FIFO is quiescent.
1261 **********************************************************************/
1263 em_82547_move_tail(void *arg)
1266 struct adapter *adapter = arg;
1269 struct em_tx_desc *tx_desc;
1270 uint16_t length = 0;
1274 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1275 sw_tdt = adapter->next_avail_tx_desc;
1277 while (hw_tdt != sw_tdt) {
1278 tx_desc = &adapter->tx_desc_base[hw_tdt];
1279 length += tx_desc->lower.flags.length;
1280 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1281 if(++hw_tdt == adapter->num_tx_desc)
1285 if (em_82547_fifo_workaround(adapter, length)) {
1286 adapter->tx_fifo_wrk++;
1287 callout_reset(&adapter->tx_fifo_timer, 1,
1288 em_82547_move_tail, adapter);
1291 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1292 em_82547_update_fifo_head(adapter, length);
1300 em_82547_fifo_workaround(struct adapter *adapter, int len)
1302 int fifo_space, fifo_pkt_len;
1304 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1306 if (adapter->link_duplex == HALF_DUPLEX) {
1307 fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1309 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1310 if (em_82547_tx_fifo_reset(adapter))
1321 em_82547_update_fifo_head(struct adapter *adapter, int len)
1323 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1325 /* tx_fifo_head is always 16 byte aligned */
1326 adapter->tx_fifo_head += fifo_pkt_len;
1327 if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1328 adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1332 em_82547_tx_fifo_reset(struct adapter *adapter)
1336 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1337 E1000_READ_REG(&adapter->hw, TDH)) &&
1338 (E1000_READ_REG(&adapter->hw, TDFT) ==
1339 E1000_READ_REG(&adapter->hw, TDFH)) &&
1340 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1341 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1342 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1344 /* Disable TX unit */
1345 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1346 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1348 /* Reset FIFO pointers */
1349 E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1350 E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1351 E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1352 E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1354 /* Re-enable TX unit */
1355 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1356 E1000_WRITE_FLUSH(&adapter->hw);
1358 adapter->tx_fifo_head = 0;
1359 adapter->tx_fifo_reset++;
1369 em_set_promisc(struct adapter *adapter)
1372 struct ifnet *ifp = &adapter->interface_data.ac_if;
1374 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1376 if (ifp->if_flags & IFF_PROMISC) {
1377 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1378 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1379 } else if (ifp->if_flags & IFF_ALLMULTI) {
1380 reg_rctl |= E1000_RCTL_MPE;
1381 reg_rctl &= ~E1000_RCTL_UPE;
1382 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1387 em_disable_promisc(struct adapter *adapter)
1391 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1393 reg_rctl &= (~E1000_RCTL_UPE);
1394 reg_rctl &= (~E1000_RCTL_MPE);
1395 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1398 /*********************************************************************
1401 * This routine is called whenever multicast address list is updated.
1403 **********************************************************************/
1406 em_set_multi(struct adapter *adapter)
1408 uint32_t reg_rctl = 0;
1409 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1410 struct ifmultiaddr *ifma;
1412 struct ifnet *ifp = &adapter->interface_data.ac_if;
1414 IOCTL_DEBUGOUT("em_set_multi: begin");
1416 if (adapter->hw.mac_type == em_82542_rev2_0) {
1417 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1418 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1419 em_pci_clear_mwi(&adapter->hw);
1420 reg_rctl |= E1000_RCTL_RST;
1421 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1425 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1426 if (ifma->ifma_addr->sa_family != AF_LINK)
1429 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1432 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1433 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1437 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1438 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1439 reg_rctl |= E1000_RCTL_MPE;
1440 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1442 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1444 if (adapter->hw.mac_type == em_82542_rev2_0) {
1445 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1446 reg_rctl &= ~E1000_RCTL_RST;
1447 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1449 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1450 em_pci_set_mwi(&adapter->hw);
1454 /*********************************************************************
1457 * This routine checks for link status and updates statistics.
1459 **********************************************************************/
1462 em_local_timer(void *arg)
1466 struct adapter *adapter = arg;
1467 ifp = &adapter->interface_data.ac_if;
1471 em_check_for_link(&adapter->hw);
1472 em_print_link_status(adapter);
1473 em_update_stats_counters(adapter);
1474 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1475 em_print_hw_stats(adapter);
1476 em_smartspeed(adapter);
1478 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1484 em_print_link_status(struct adapter *adapter)
1486 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1487 if (adapter->link_active == 0) {
1488 em_get_speed_and_duplex(&adapter->hw,
1489 &adapter->link_speed,
1490 &adapter->link_duplex);
1491 device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1492 adapter->link_speed,
1493 ((adapter->link_duplex == FULL_DUPLEX) ?
1494 "Full Duplex" : "Half Duplex"));
1495 adapter->link_active = 1;
1496 adapter->smartspeed = 0;
1499 if (adapter->link_active == 1) {
1500 adapter->link_speed = 0;
1501 adapter->link_duplex = 0;
1502 device_printf(adapter->dev, "Link is Down\n");
1503 adapter->link_active = 0;
1508 /*********************************************************************
1510 * This routine disables all traffic on the adapter by issuing a
1511 * global reset on the MAC and deallocates TX/RX buffers.
1513 **********************************************************************/
1519 struct adapter * adapter = arg;
1520 ifp = &adapter->interface_data.ac_if;
1522 INIT_DEBUGOUT("em_stop: begin");
1523 em_disable_intr(adapter);
1524 em_reset_hw(&adapter->hw);
1525 callout_stop(&adapter->timer);
1526 callout_stop(&adapter->tx_fifo_timer);
1527 em_free_transmit_structures(adapter);
1528 em_free_receive_structures(adapter);
1530 /* Tell the stack that the interface is no longer active */
1531 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1535 /*********************************************************************
1537 * Determine hardware revision.
1539 **********************************************************************/
1541 em_identify_hardware(struct adapter * adapter)
1543 device_t dev = adapter->dev;
1545 /* Make sure our PCI config space has the necessary stuff set */
1546 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1547 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1548 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1549 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1550 adapter->hw.pci_cmd_word |=
1551 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1552 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1555 /* Save off the information about this board */
1556 adapter->hw.vendor_id = pci_get_vendor(dev);
1557 adapter->hw.device_id = pci_get_device(dev);
1558 adapter->hw.revision_id = pci_get_revid(dev);
1559 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1560 adapter->hw.subsystem_id = pci_get_subdevice(dev);
1562 /* Identify the MAC */
1563 if (em_set_mac_type(&adapter->hw))
1564 device_printf(dev, "Unknown MAC Type\n");
1566 if (adapter->hw.mac_type == em_82541 ||
1567 adapter->hw.mac_type == em_82541_rev_2 ||
1568 adapter->hw.mac_type == em_82547 ||
1569 adapter->hw.mac_type == em_82547_rev_2)
1570 adapter->hw.phy_init_script = TRUE;
1573 /*********************************************************************
1575 * Initialize the hardware to a configuration as specified by the
1576 * adapter structure. The controller is reset, the EEPROM is
1577 * verified, the MAC address is set, then the shared initialization
1578 * routines are called.
1580 **********************************************************************/
1582 em_hardware_init(struct adapter *adapter)
1584 INIT_DEBUGOUT("em_hardware_init: begin");
1585 /* Issue a global reset */
1586 em_reset_hw(&adapter->hw);
1588 /* When hardware is reset, fifo_head is also reset */
1589 adapter->tx_fifo_head = 0;
1591 /* Make sure we have a good EEPROM before we read from it */
1592 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1593 device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1597 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1598 device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1602 if (em_init_hw(&adapter->hw) < 0) {
1603 device_printf(adapter->dev, "Hardware Initialization Failed");
1607 em_check_for_link(&adapter->hw);
1608 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1609 adapter->link_active = 1;
1611 adapter->link_active = 0;
1613 if (adapter->link_active) {
1614 em_get_speed_and_duplex(&adapter->hw,
1615 &adapter->link_speed,
1616 &adapter->link_duplex);
1618 adapter->link_speed = 0;
1619 adapter->link_duplex = 0;
1625 /*********************************************************************
1627 * Setup networking device structure and register an interface.
1629 **********************************************************************/
1631 em_setup_interface(device_t dev, struct adapter *adapter)
1634 INIT_DEBUGOUT("em_setup_interface: begin");
1636 ifp = &adapter->interface_data.ac_if;
1637 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1638 ifp->if_mtu = ETHERMTU;
1639 ifp->if_baudrate = 1000000000;
1640 ifp->if_init = em_init;
1641 ifp->if_softc = adapter;
1642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1643 ifp->if_ioctl = em_ioctl;
1644 ifp->if_start = em_start;
1645 ifp->if_watchdog = em_watchdog;
1646 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1648 ether_ifattach(ifp, adapter->hw.mac_addr);
1650 if (adapter->hw.mac_type >= em_82543) {
1651 ifp->if_capabilities = IFCAP_HWCSUM;
1652 ifp->if_capenable = ifp->if_capabilities;
1656 * Tell the upper layer(s) we support long frames.
1658 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1659 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1662 * Specify the media types supported by this adapter and register
1663 * callbacks to update media and link information
1665 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1667 if (adapter->hw.media_type == em_media_type_fiber) {
1668 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1670 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1673 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1674 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1676 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1678 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1680 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX | IFM_FDX,
1682 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX, 0, NULL);
1684 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1685 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1688 /*********************************************************************
1690 * Workaround for SmartSpeed on 82541 and 82547 controllers
1692 **********************************************************************/
1694 em_smartspeed(struct adapter *adapter)
1698 if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1699 !adapter->hw.autoneg ||
1700 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1703 if (adapter->smartspeed == 0) {
1705 * If Master/Slave config fault is asserted twice,
1706 * we assume back-to-back.
1708 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1709 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1711 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1712 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1713 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1715 if (phy_tmp & CR_1000T_MS_ENABLE) {
1716 phy_tmp &= ~CR_1000T_MS_ENABLE;
1717 em_write_phy_reg(&adapter->hw,
1718 PHY_1000T_CTRL, phy_tmp);
1719 adapter->smartspeed++;
1720 if (adapter->hw.autoneg &&
1721 !em_phy_setup_autoneg(&adapter->hw) &&
1722 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1724 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1725 MII_CR_RESTART_AUTO_NEG);
1726 em_write_phy_reg(&adapter->hw,
1732 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1733 /* If still no link, perhaps using 2/3 pair cable */
1734 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1735 phy_tmp |= CR_1000T_MS_ENABLE;
1736 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1737 if (adapter->hw.autoneg &&
1738 !em_phy_setup_autoneg(&adapter->hw) &&
1739 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1740 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1741 MII_CR_RESTART_AUTO_NEG);
1742 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1745 /* Restart process after EM_SMARTSPEED_MAX iterations */
1746 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1747 adapter->smartspeed = 0;
1751 * Manage DMA'able memory.
1754 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1758 *(bus_addr_t*) arg = segs->ds_addr;
1762 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1763 struct em_dma_alloc *dma, int mapflags)
1766 device_t dev = adapter->dev;
1768 r = bus_dma_tag_create(NULL, /* parent */
1769 PAGE_SIZE, 0, /* alignment, bounds */
1770 BUS_SPACE_MAXADDR, /* lowaddr */
1771 BUS_SPACE_MAXADDR, /* highaddr */
1772 NULL, NULL, /* filter, filterarg */
1775 size, /* maxsegsize */
1776 BUS_DMA_ALLOCNOW, /* flags */
1779 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1784 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1785 BUS_DMA_NOWAIT, &dma->dma_map);
1787 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1788 "size %llu, error %d\n", (uintmax_t)size, r);
1792 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1796 mapflags | BUS_DMA_NOWAIT);
1798 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1803 dma->dma_size = size;
1807 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1809 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1810 bus_dma_tag_destroy(dma->dma_tag);
1812 dma->dma_map = NULL;
1813 dma->dma_tag = NULL;
1818 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1820 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1821 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1822 bus_dma_tag_destroy(dma->dma_tag);
1825 /*********************************************************************
1827 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1828 * the information needed to transmit a packet on the wire.
1830 **********************************************************************/
1832 em_allocate_transmit_structures(struct adapter * adapter)
1834 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1835 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1836 if (adapter->tx_buffer_area == NULL) {
1837 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1844 /*********************************************************************
1846 * Allocate and initialize transmit structures.
1848 **********************************************************************/
1850 em_setup_transmit_structures(struct adapter * adapter)
1853 * Setup DMA descriptor areas.
1855 if (bus_dma_tag_create(NULL, /* parent */
1856 1, 0, /* alignment, bounds */
1857 BUS_SPACE_MAXADDR, /* lowaddr */
1858 BUS_SPACE_MAXADDR, /* highaddr */
1859 NULL, NULL, /* filter, filterarg */
1860 MCLBYTES * 8, /* maxsize */
1861 EM_MAX_SCATTER, /* nsegments */
1862 MCLBYTES * 8, /* maxsegsize */
1863 BUS_DMA_ALLOCNOW, /* flags */
1865 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1869 if (em_allocate_transmit_structures(adapter))
1872 bzero((void *) adapter->tx_desc_base,
1873 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1875 adapter->next_avail_tx_desc = 0;
1876 adapter->oldest_used_tx_desc = 0;
1878 /* Set number of descriptors available */
1879 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1881 /* Set checksum context */
1882 adapter->active_checksum_context = OFFLOAD_NONE;
1887 /*********************************************************************
1889 * Enable transmit unit.
1891 **********************************************************************/
1893 em_initialize_transmit_unit(struct adapter * adapter)
1896 uint32_t reg_tipg = 0;
1899 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1901 /* Setup the Base and Length of the Tx Descriptor Ring */
1902 bus_addr = adapter->txdma.dma_paddr;
1903 E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1904 E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1905 E1000_WRITE_REG(&adapter->hw, TDLEN,
1906 adapter->num_tx_desc * sizeof(struct em_tx_desc));
1908 /* Setup the HW Tx Head and Tail descriptor pointers */
1909 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1910 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1912 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1913 E1000_READ_REG(&adapter->hw, TDBAL),
1914 E1000_READ_REG(&adapter->hw, TDLEN));
1916 /* Set the default values for the Tx Inter Packet Gap timer */
1917 switch (adapter->hw.mac_type) {
1918 case em_82542_rev2_0:
1919 case em_82542_rev2_1:
1920 reg_tipg = DEFAULT_82542_TIPG_IPGT;
1921 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1922 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1925 if (adapter->hw.media_type == em_media_type_fiber)
1926 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1928 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1929 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1930 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1933 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1934 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1935 if (adapter->hw.mac_type >= em_82540)
1936 E1000_WRITE_REG(&adapter->hw, TADV,
1937 adapter->tx_abs_int_delay.value);
1939 /* Program the Transmit Control Register */
1940 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1941 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1942 if (adapter->link_duplex == 1)
1943 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1945 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1946 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1948 /* Setup Transmit Descriptor Settings for this adapter */
1949 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1951 if (adapter->tx_int_delay.value > 0)
1952 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1955 /*********************************************************************
1957 * Free all transmit related data structures.
1959 **********************************************************************/
1961 em_free_transmit_structures(struct adapter * adapter)
1963 struct em_buffer *tx_buffer;
1966 INIT_DEBUGOUT("free_transmit_structures: begin");
1968 if (adapter->tx_buffer_area != NULL) {
1969 tx_buffer = adapter->tx_buffer_area;
1970 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1971 if (tx_buffer->m_head != NULL) {
1972 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1973 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1974 m_freem(tx_buffer->m_head);
1976 tx_buffer->m_head = NULL;
1979 if (adapter->tx_buffer_area != NULL) {
1980 free(adapter->tx_buffer_area, M_DEVBUF);
1981 adapter->tx_buffer_area = NULL;
1983 if (adapter->txtag != NULL) {
1984 bus_dma_tag_destroy(adapter->txtag);
1985 adapter->txtag = NULL;
1989 /*********************************************************************
1991 * The offload context needs to be set when we transfer the first
1992 * packet of a particular protocol (TCP/UDP). We change the
1993 * context only if the protocol type changes.
1995 **********************************************************************/
1997 em_transmit_checksum_setup(struct adapter * adapter,
1999 uint32_t *txd_upper,
2000 uint32_t *txd_lower)
2002 struct em_context_desc *TXD;
2003 struct em_buffer *tx_buffer;
2006 if (mp->m_pkthdr.csum_flags) {
2007 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2008 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2009 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2010 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2013 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2014 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2015 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2016 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2017 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2020 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2032 /* If we reach this point, the checksum offload context
2033 * needs to be reset.
2035 curr_txd = adapter->next_avail_tx_desc;
2036 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2037 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2039 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2040 TXD->lower_setup.ip_fields.ipcso =
2041 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2042 TXD->lower_setup.ip_fields.ipcse =
2043 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2045 TXD->upper_setup.tcp_fields.tucss =
2046 ETHER_HDR_LEN + sizeof(struct ip);
2047 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2049 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2050 TXD->upper_setup.tcp_fields.tucso =
2051 ETHER_HDR_LEN + sizeof(struct ip) +
2052 offsetof(struct tcphdr, th_sum);
2053 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2054 TXD->upper_setup.tcp_fields.tucso =
2055 ETHER_HDR_LEN + sizeof(struct ip) +
2056 offsetof(struct udphdr, uh_sum);
2059 TXD->tcp_seg_setup.data = htole32(0);
2060 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2062 tx_buffer->m_head = NULL;
2064 if (++curr_txd == adapter->num_tx_desc)
2067 adapter->num_tx_desc_avail--;
2068 adapter->next_avail_tx_desc = curr_txd;
2071 /**********************************************************************
2073 * Examine each tx_buffer in the used queue. If the hardware is done
2074 * processing the packet then free associated resources. The
2075 * tx_buffer is put back on the free queue.
2077 **********************************************************************/
2080 em_clean_transmit_interrupts(struct adapter *adapter)
2084 struct em_buffer *tx_buffer;
2085 struct em_tx_desc *tx_desc;
2086 struct ifnet *ifp = &adapter->interface_data.ac_if;
2088 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2093 adapter->clean_tx_interrupts++;
2095 num_avail = adapter->num_tx_desc_avail;
2096 i = adapter->oldest_used_tx_desc;
2098 tx_buffer = &adapter->tx_buffer_area[i];
2099 tx_desc = &adapter->tx_desc_base[i];
2101 while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2102 tx_desc->upper.data = 0;
2105 if (tx_buffer->m_head) {
2107 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2108 BUS_DMASYNC_POSTWRITE);
2109 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2110 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2112 m_freem(tx_buffer->m_head);
2113 tx_buffer->m_head = NULL;
2116 if (++i == adapter->num_tx_desc)
2119 tx_buffer = &adapter->tx_buffer_area[i];
2120 tx_desc = &adapter->tx_desc_base[i];
2123 adapter->oldest_used_tx_desc = i;
2126 * If we have enough room, clear IFF_OACTIVE to tell the stack
2127 * that it is OK to send packets.
2128 * If there are no pending descriptors, clear the timeout. Otherwise,
2129 * if some descriptors have been freed, restart the timeout.
2131 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2132 ifp->if_flags &= ~IFF_OACTIVE;
2133 if (num_avail == adapter->num_tx_desc)
2135 else if (num_avail == adapter->num_tx_desc_avail)
2136 ifp->if_timer = EM_TX_TIMEOUT;
2138 adapter->num_tx_desc_avail = num_avail;
2142 /*********************************************************************
2144 * Get a buffer from system mbuf buffer pool.
2146 **********************************************************************/
2148 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2150 struct mbuf *mp = nmp;
2151 struct em_buffer *rx_buffer;
2156 ifp = &adapter->interface_data.ac_if;
2159 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2161 adapter->mbuf_cluster_failed++;
2164 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2166 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2167 mp->m_data = mp->m_ext.ext_buf;
2170 if (ifp->if_mtu <= ETHERMTU)
2171 m_adj(mp, ETHER_ALIGN);
2173 rx_buffer = &adapter->rx_buffer_area[i];
2176 * Using memory from the mbuf cluster pool, invoke the
2177 * bus_dma machinery to arrange the memory mapping.
2179 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2180 mtod(mp, void *), mp->m_len,
2181 em_dmamap_cb, &paddr, 0);
2186 rx_buffer->m_head = mp;
2187 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2188 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2193 /*********************************************************************
2195 * Allocate memory for rx_buffer structures. Since we use one
2196 * rx_buffer per received packet, the maximum number of rx_buffer's
2197 * that we'll need is equal to the number of receive descriptors
2198 * that we've allocated.
2200 **********************************************************************/
2202 em_allocate_receive_structures(struct adapter *adapter)
2205 struct em_buffer *rx_buffer;
2207 size = adapter->num_rx_desc * sizeof(struct em_buffer);
2208 adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2210 error = bus_dma_tag_create(NULL, /* parent */
2211 1, 0, /* alignment, bounds */
2212 BUS_SPACE_MAXADDR, /* lowaddr */
2213 BUS_SPACE_MAXADDR, /* highaddr */
2214 NULL, NULL, /* filter, filterarg */
2215 MCLBYTES, /* maxsize */
2217 MCLBYTES, /* maxsegsize */
2218 BUS_DMA_ALLOCNOW, /* flags */
2221 device_printf(adapter->dev, "em_allocate_receive_structures: "
2222 "bus_dma_tag_create failed; error %u\n", error);
2226 rx_buffer = adapter->rx_buffer_area;
2227 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2228 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2231 device_printf(adapter->dev,
2232 "em_allocate_receive_structures: "
2233 "bus_dmamap_create failed; error %u\n",
2239 for (i = 0; i < adapter->num_rx_desc; i++) {
2240 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2242 adapter->rx_buffer_area[i].m_head = NULL;
2243 adapter->rx_desc_base[i].buffer_addr = 0;
2251 bus_dma_tag_destroy(adapter->rxtag);
2253 adapter->rxtag = NULL;
2254 free(adapter->rx_buffer_area, M_DEVBUF);
2255 adapter->rx_buffer_area = NULL;
2259 /*********************************************************************
2261 * Allocate and initialize receive structures.
2263 **********************************************************************/
2265 em_setup_receive_structures(struct adapter *adapter)
2267 bzero((void *) adapter->rx_desc_base,
2268 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2270 if (em_allocate_receive_structures(adapter))
2273 /* Setup our descriptor pointers */
2274 adapter->next_rx_desc_to_check = 0;
2278 /*********************************************************************
2280 * Enable receive unit.
2282 **********************************************************************/
2284 em_initialize_receive_unit(struct adapter *adapter)
2287 uint32_t reg_rxcsum;
2291 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2293 ifp = &adapter->interface_data.ac_if;
2295 /* Make sure receives are disabled while setting up the descriptor ring */
2296 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2298 /* Set the Receive Delay Timer Register */
2299 E1000_WRITE_REG(&adapter->hw, RDTR,
2300 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2302 if(adapter->hw.mac_type >= em_82540) {
2303 E1000_WRITE_REG(&adapter->hw, RADV,
2304 adapter->rx_abs_int_delay.value);
2306 /* Set the interrupt throttling rate in 256ns increments */
2307 if (em_int_throttle_ceil) {
2308 E1000_WRITE_REG(&adapter->hw, ITR,
2309 1000000000 / 256 / em_int_throttle_ceil);
2311 E1000_WRITE_REG(&adapter->hw, ITR, 0);
2315 /* Setup the Base and Length of the Rx Descriptor Ring */
2316 bus_addr = adapter->rxdma.dma_paddr;
2317 E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2318 E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2319 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2320 sizeof(struct em_rx_desc));
2322 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2323 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2324 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2326 /* Setup the Receive Control Register */
2327 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2328 E1000_RCTL_RDMTS_HALF |
2329 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2331 if (adapter->hw.tbi_compatibility_on == TRUE)
2332 reg_rctl |= E1000_RCTL_SBP;
2334 switch (adapter->rx_buffer_len) {
2336 case EM_RXBUFFER_2048:
2337 reg_rctl |= E1000_RCTL_SZ_2048;
2339 case EM_RXBUFFER_4096:
2340 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2342 case EM_RXBUFFER_8192:
2343 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2345 case EM_RXBUFFER_16384:
2346 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2350 if (ifp->if_mtu > ETHERMTU)
2351 reg_rctl |= E1000_RCTL_LPE;
2353 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2354 if ((adapter->hw.mac_type >= em_82543) &&
2355 (ifp->if_capenable & IFCAP_RXCSUM)) {
2356 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2357 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2358 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2361 /* Enable Receives */
2362 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2365 /*********************************************************************
2367 * Free receive related data structures.
2369 **********************************************************************/
2371 em_free_receive_structures(struct adapter *adapter)
2373 struct em_buffer *rx_buffer;
2376 INIT_DEBUGOUT("free_receive_structures: begin");
2378 if (adapter->rx_buffer_area != NULL) {
2379 rx_buffer = adapter->rx_buffer_area;
2380 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2381 if (rx_buffer->map != NULL) {
2382 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2383 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2385 if (rx_buffer->m_head != NULL)
2386 m_freem(rx_buffer->m_head);
2387 rx_buffer->m_head = NULL;
2390 if (adapter->rx_buffer_area != NULL) {
2391 free(adapter->rx_buffer_area, M_DEVBUF);
2392 adapter->rx_buffer_area = NULL;
2394 if (adapter->rxtag != NULL) {
2395 bus_dma_tag_destroy(adapter->rxtag);
2396 adapter->rxtag = NULL;
2400 /*********************************************************************
2402 * This routine executes in interrupt context. It replenishes
2403 * the mbufs in the descriptor and sends data which has been
2404 * dma'ed into host memory to upper layer.
2406 * We loop at most count times if count is > 0, or until done if
2409 *********************************************************************/
2411 em_process_receive_interrupts(struct adapter *adapter, int count)
2415 uint8_t accept_frame = 0;
2417 uint16_t len, desc_len, prev_len_adj;
2420 /* Pointer to the receive descriptor being examined. */
2421 struct em_rx_desc *current_desc;
2423 ifp = &adapter->interface_data.ac_if;
2424 i = adapter->next_rx_desc_to_check;
2425 current_desc = &adapter->rx_desc_base[i];
2427 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2429 adapter->no_pkts_avail++;
2433 while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2434 mp = adapter->rx_buffer_area[i].m_head;
2435 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2436 BUS_DMASYNC_POSTREAD);
2440 desc_len = le16toh(current_desc->length);
2441 if (current_desc->status & E1000_RXD_STAT_EOP) {
2444 if (desc_len < ETHER_CRC_LEN) {
2446 prev_len_adj = ETHER_CRC_LEN - desc_len;
2449 len = desc_len - ETHER_CRC_LEN;
2456 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2458 uint32_t pkt_len = desc_len;
2460 if (adapter->fmp != NULL)
2461 pkt_len += adapter->fmp->m_pkthdr.len;
2463 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2465 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2466 current_desc->errors,
2467 pkt_len, last_byte)) {
2468 em_tbi_adjust_stats(&adapter->hw,
2471 adapter->hw.mac_addr);
2481 if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2482 adapter->dropped_pkts++;
2483 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2484 if (adapter->fmp != NULL)
2485 m_freem(adapter->fmp);
2486 adapter->fmp = NULL;
2487 adapter->lmp = NULL;
2491 /* Assign correct length to the current fragment */
2494 if (adapter->fmp == NULL) {
2495 mp->m_pkthdr.len = len;
2496 adapter->fmp = mp; /* Store the first mbuf */
2499 /* Chain mbuf's together */
2500 mp->m_flags &= ~M_PKTHDR;
2502 * Adjust length of previous mbuf in chain if we
2503 * received less than 4 bytes in the last descriptor.
2505 if (prev_len_adj > 0) {
2506 adapter->lmp->m_len -= prev_len_adj;
2507 adapter->fmp->m_pkthdr.len -= prev_len_adj;
2509 adapter->lmp->m_next = mp;
2510 adapter->lmp = adapter->lmp->m_next;
2511 adapter->fmp->m_pkthdr.len += len;
2515 adapter->fmp->m_pkthdr.rcvif = ifp;
2518 em_receive_checksum(adapter, current_desc,
2520 if (current_desc->status & E1000_RXD_STAT_VP)
2521 VLAN_INPUT_TAG(adapter->fmp,
2522 (current_desc->special &
2523 E1000_RXD_SPC_VLAN_MASK));
2525 (*ifp->if_input)(ifp, adapter->fmp);
2526 adapter->fmp = NULL;
2527 adapter->lmp = NULL;
2530 adapter->dropped_pkts++;
2531 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2532 if (adapter->fmp != NULL)
2533 m_freem(adapter->fmp);
2534 adapter->fmp = NULL;
2535 adapter->lmp = NULL;
2538 /* Zero out the receive descriptors status */
2539 current_desc->status = 0;
2541 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
2542 E1000_WRITE_REG(&adapter->hw, RDT, i);
2544 /* Advance our pointers to the next descriptor */
2545 if (++i == adapter->num_rx_desc) {
2547 current_desc = adapter->rx_desc_base;
2551 adapter->next_rx_desc_to_check = i;
2554 /*********************************************************************
2556 * Verify that the hardware indicated that the checksum is valid.
2557 * Inform the stack about the status of checksum so that stack
2558 * doesn't spend time verifying the checksum.
2560 *********************************************************************/
2562 em_receive_checksum(struct adapter *adapter,
2563 struct em_rx_desc *rx_desc,
2566 /* 82543 or newer only */
2567 if ((adapter->hw.mac_type < em_82543) ||
2568 /* Ignore Checksum bit is set */
2569 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2570 mp->m_pkthdr.csum_flags = 0;
2574 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2576 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2577 /* IP Checksum Good */
2578 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2579 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2581 mp->m_pkthdr.csum_flags = 0;
2585 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2587 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2588 mp->m_pkthdr.csum_flags |=
2589 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2590 mp->m_pkthdr.csum_data = htons(0xffff);
2597 em_enable_vlans(struct adapter *adapter)
2601 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2603 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2604 ctrl |= E1000_CTRL_VME;
2605 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2609 em_enable_intr(struct adapter *adapter)
2611 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2615 em_disable_intr(struct adapter *adapter)
2617 E1000_WRITE_REG(&adapter->hw, IMC,
2618 (0xffffffff & ~E1000_IMC_RXSEQ));
2622 em_is_valid_ether_addr(uint8_t *addr)
2624 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2626 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2633 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2635 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2639 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2641 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2645 em_pci_set_mwi(struct em_hw *hw)
2647 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2648 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2652 em_pci_clear_mwi(struct em_hw *hw)
2654 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2655 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2659 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2661 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2662 return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2666 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2668 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2669 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2672 /*********************************************************************
2673 * 82544 Coexistence issue workaround.
2674 * There are 2 issues.
2675 * 1. Transmit Hang issue.
2676 * To detect this issue, following equation can be used...
2677 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2678 * If SUM[3:0] is in between 1 to 4, we will have this issue.
2681 * To detect this issue, following equation can be used...
2682 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2683 * If SUM[3:0] is in between 9 to c, we will have this issue.
2687 * Make sure we do not have ending address as 1,2,3,4(Hang) or
2690 *************************************************************************/
2692 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2694 /* Since issue is sensitive to length and address.*/
2695 /* Let us first check the address...*/
2696 uint32_t safe_terminator;
2698 desc_array->descriptor[0].address = address;
2699 desc_array->descriptor[0].length = length;
2700 desc_array->elements = 1;
2701 return(desc_array->elements);
2703 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2704 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2705 if (safe_terminator == 0 ||
2706 (safe_terminator > 4 && safe_terminator < 9) ||
2707 (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2708 desc_array->descriptor[0].address = address;
2709 desc_array->descriptor[0].length = length;
2710 desc_array->elements = 1;
2711 return(desc_array->elements);
2714 desc_array->descriptor[0].address = address;
2715 desc_array->descriptor[0].length = length - 4;
2716 desc_array->descriptor[1].address = address + (length - 4);
2717 desc_array->descriptor[1].length = 4;
2718 desc_array->elements = 2;
2719 return(desc_array->elements);
2722 /**********************************************************************
2724 * Update the board statistics counters.
2726 **********************************************************************/
2728 em_update_stats_counters(struct adapter *adapter)
2732 if (adapter->hw.media_type == em_media_type_copper ||
2733 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2734 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2735 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2737 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2738 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2739 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2740 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2742 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2743 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2744 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2745 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2746 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2747 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2748 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2749 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2750 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2751 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2752 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2753 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2754 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2755 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2756 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2757 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2758 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2759 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2760 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2761 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2763 /* For the 64-bit byte counters the low dword must be read first. */
2764 /* Both registers clear on the read of the high dword */
2766 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2767 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2768 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2769 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2771 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2772 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2773 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2774 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2775 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2777 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2778 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2779 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2780 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2782 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2783 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2784 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2785 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2786 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2787 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2788 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2789 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2790 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2791 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2793 if (adapter->hw.mac_type >= em_82543) {
2794 adapter->stats.algnerrc +=
2795 E1000_READ_REG(&adapter->hw, ALGNERRC);
2796 adapter->stats.rxerrc +=
2797 E1000_READ_REG(&adapter->hw, RXERRC);
2798 adapter->stats.tncrs +=
2799 E1000_READ_REG(&adapter->hw, TNCRS);
2800 adapter->stats.cexterr +=
2801 E1000_READ_REG(&adapter->hw, CEXTERR);
2802 adapter->stats.tsctc +=
2803 E1000_READ_REG(&adapter->hw, TSCTC);
2804 adapter->stats.tsctfc +=
2805 E1000_READ_REG(&adapter->hw, TSCTFC);
2807 ifp = &adapter->interface_data.ac_if;
2809 /* Fill out the OS statistics structure */
2810 ifp->if_ibytes = adapter->stats.gorcl;
2811 ifp->if_obytes = adapter->stats.gotcl;
2812 ifp->if_imcasts = adapter->stats.mprc;
2813 ifp->if_collisions = adapter->stats.colc;
2816 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2817 adapter->stats.crcerrs + adapter->stats.algnerrc +
2818 adapter->stats.rlec + adapter->stats.rnbc +
2819 adapter->stats.mpc + adapter->stats.cexterr;
2822 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2826 /**********************************************************************
2828 * This routine is called only when em_display_debug_stats is enabled.
2829 * This routine provides a way to take a look at important statistics
2830 * maintained by the driver and hardware.
2832 **********************************************************************/
2834 em_print_debug_info(struct adapter *adapter)
2836 device_t dev= adapter->dev;
2837 uint8_t *hw_addr = adapter->hw.hw_addr;
2839 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2840 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2841 E1000_READ_REG(&adapter->hw, TIDV),
2842 E1000_READ_REG(&adapter->hw, TADV));
2843 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2844 E1000_READ_REG(&adapter->hw, RDTR),
2845 E1000_READ_REG(&adapter->hw, RADV));
2847 device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2848 device_printf(dev, "CleanTxInterrupts = %ld\n",
2849 adapter->clean_tx_interrupts);
2851 device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2852 (long long)adapter->tx_fifo_wrk,
2853 (long long)adapter->tx_fifo_reset);
2854 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2855 E1000_READ_REG(&adapter->hw, TDH),
2856 E1000_READ_REG(&adapter->hw, TDT));
2857 device_printf(dev, "Num Tx descriptors avail = %d\n",
2858 adapter->num_tx_desc_avail);
2859 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2860 adapter->no_tx_desc_avail1);
2861 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2862 adapter->no_tx_desc_avail2);
2863 device_printf(dev, "Std mbuf failed = %ld\n",
2864 adapter->mbuf_alloc_failed);
2865 device_printf(dev, "Std mbuf cluster failed = %ld\n",
2866 adapter->mbuf_cluster_failed);
2867 device_printf(dev, "Driver dropped packets = %ld\n",
2868 adapter->dropped_pkts);
2872 em_print_hw_stats(struct adapter *adapter)
2874 device_t dev= adapter->dev;
2876 device_printf(dev, "Adapter: %p\n", adapter);
2878 device_printf(dev, "Excessive collisions = %lld\n",
2879 (long long)adapter->stats.ecol);
2880 device_printf(dev, "Symbol errors = %lld\n",
2881 (long long)adapter->stats.symerrs);
2882 device_printf(dev, "Sequence errors = %lld\n",
2883 (long long)adapter->stats.sec);
2884 device_printf(dev, "Defer count = %lld\n",
2885 (long long)adapter->stats.dc);
2887 device_printf(dev, "Missed Packets = %lld\n",
2888 (long long)adapter->stats.mpc);
2889 device_printf(dev, "Receive No Buffers = %lld\n",
2890 (long long)adapter->stats.rnbc);
2891 device_printf(dev, "Receive length errors = %lld\n",
2892 (long long)adapter->stats.rlec);
2893 device_printf(dev, "Receive errors = %lld\n",
2894 (long long)adapter->stats.rxerrc);
2895 device_printf(dev, "Crc errors = %lld\n",
2896 (long long)adapter->stats.crcerrs);
2897 device_printf(dev, "Alignment errors = %lld\n",
2898 (long long)adapter->stats.algnerrc);
2899 device_printf(dev, "Carrier extension errors = %lld\n",
2900 (long long)adapter->stats.cexterr);
2902 device_printf(dev, "XON Rcvd = %lld\n",
2903 (long long)adapter->stats.xonrxc);
2904 device_printf(dev, "XON Xmtd = %lld\n",
2905 (long long)adapter->stats.xontxc);
2906 device_printf(dev, "XOFF Rcvd = %lld\n",
2907 (long long)adapter->stats.xoffrxc);
2908 device_printf(dev, "XOFF Xmtd = %lld\n",
2909 (long long)adapter->stats.xofftxc);
2911 device_printf(dev, "Good Packets Rcvd = %lld\n",
2912 (long long)adapter->stats.gprc);
2913 device_printf(dev, "Good Packets Xmtd = %lld\n",
2914 (long long)adapter->stats.gptc);
2918 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2922 struct adapter *adapter;
2925 error = sysctl_handle_int(oidp, &result, 0, req);
2927 if (error || !req->newptr)
2931 adapter = (struct adapter *)arg1;
2932 em_print_debug_info(adapter);
2939 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2943 struct adapter *adapter;
2946 error = sysctl_handle_int(oidp, &result, 0, req);
2948 if (error || !req->newptr)
2952 adapter = (struct adapter *)arg1;
2953 em_print_hw_stats(adapter);
2960 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2962 struct em_int_delay_info *info;
2963 struct adapter *adapter;
2970 info = (struct em_int_delay_info *)arg1;
2971 adapter = info->adapter;
2972 usecs = info->value;
2973 error = sysctl_handle_int(oidp, &usecs, 0, req);
2974 if (error != 0 || req->newptr == NULL)
2976 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
2978 info->value = usecs;
2979 ticks = E1000_USECS_TO_TICKS(usecs);
2982 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
2983 regval = (regval & ~0xffff) | (ticks & 0xffff);
2984 /* Handle a few special cases. */
2985 switch (info->offset) {
2987 case E1000_82542_RDTR:
2988 regval |= E1000_RDT_FPDB;
2991 case E1000_82542_TIDV:
2993 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
2994 /* Don't write 0 into the TIDV register. */
2997 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3000 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3006 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3007 const char *description, struct em_int_delay_info *info,
3008 int offset, int value)
3010 info->adapter = adapter;
3011 info->offset = offset;
3012 info->value = value;
3013 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3014 SYSCTL_CHILDREN(adapter->sysctl_tree),
3015 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3016 info, 0, em_sysctl_int_delay, "I", description);
3020 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3022 struct adapter *adapter = (void *)arg1;
3026 throttle = em_int_throttle_ceil;
3027 error = sysctl_handle_int(oidp, &throttle, 0, req);
3028 if (error || req->newptr == NULL)
3030 if (throttle < 0 || throttle > 1000000000 / 256)
3034 * Set the interrupt throttling rate in 256ns increments,
3035 * recalculate sysctl value assignment to get exact frequency.
3037 throttle = 1000000000 / 256 / throttle;
3038 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3040 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3043 em_int_throttle_ceil = 0;
3045 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3048 device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n",
3049 em_int_throttle_ceil);